aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-09 10:36:01 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-09 23:54:04 -0400
commit478142c39c8c2f5f63038e5f2224e6729406e587 (patch)
tree222d3fc2fd8142299140d05206ada65b653e4a33 /kernel/trace
parentd8eeb2d3b26d25c44c10f28430e2157a2d20bd1d (diff)
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer. It only traces the functions on the CPU where the task it is following is on. If a task is woken on one CPU but then migrates to another CPU before it wakes up, the latency tracer will then start tracing functions on the other CPU. To find which CPU the task is on, the wakeup function tracer performs a task_cpu(wakeup_task). But to make sure the task does not disappear it grabs the wakeup_lock, which is also taken when the task wakes up. By taking this lock, the function tracer does not need to worry about the task being freed as it checks its cpu. Jan Blunck found a problem with this approach on his 32 CPU box. When a task is being traced by the wakeup tracer, all functions take this lock. That means that on all 32 CPUs, each function call is taking this one lock to see if the task is on that CPU. This lock has just serialized all functions on all 32 CPUs. Needless to say, this caused major issues on that box. It would even lockup. This patch changes the wakeup latency to insert a probe on the migrate task tracepoint. When a task changes its CPU that it will run on, the probe will take note. Now the wakeup function tracer no longer needs to take the lock. It only compares the current CPU with a variable that holds the current CPU the task is on. We don't worry about races since it is OK to add or miss a function trace. Reported-by: Jan Blunck <jblunck@suse.de> Tested-by: Jan Blunck <jblunck@suse.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_sched_wakeup.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ad69f105a7c6..cf43bdb1763a 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -24,6 +24,7 @@ static int __read_mostly tracer_enabled;
24 24
25static struct task_struct *wakeup_task; 25static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static int wakeup_current_cpu;
27static unsigned wakeup_prio = -1; 28static unsigned wakeup_prio = -1;
28static int wakeup_rt; 29static int wakeup_rt;
29 30
@@ -56,33 +57,23 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
56 resched = ftrace_preempt_disable(); 57 resched = ftrace_preempt_disable();
57 58
58 cpu = raw_smp_processor_id(); 59 cpu = raw_smp_processor_id();
60 if (cpu != wakeup_current_cpu)
61 goto out_enable;
62
59 data = tr->data[cpu]; 63 data = tr->data[cpu];
60 disabled = atomic_inc_return(&data->disabled); 64 disabled = atomic_inc_return(&data->disabled);
61 if (unlikely(disabled != 1)) 65 if (unlikely(disabled != 1))
62 goto out; 66 goto out;
63 67
64 local_irq_save(flags); 68 local_irq_save(flags);
65 __raw_spin_lock(&wakeup_lock);
66
67 if (unlikely(!wakeup_task))
68 goto unlock;
69
70 /*
71 * The task can't disappear because it needs to
72 * wake up first, and we have the wakeup_lock.
73 */
74 if (task_cpu(wakeup_task) != cpu)
75 goto unlock;
76 69
77 trace_function(tr, ip, parent_ip, flags, pc); 70 trace_function(tr, ip, parent_ip, flags, pc);
78 71
79 unlock:
80 __raw_spin_unlock(&wakeup_lock);
81 local_irq_restore(flags); 72 local_irq_restore(flags);
82 73
83 out: 74 out:
84 atomic_dec(&data->disabled); 75 atomic_dec(&data->disabled);
85 76 out_enable:
86 ftrace_preempt_enable(resched); 77 ftrace_preempt_enable(resched);
87} 78}
88 79
@@ -107,6 +98,14 @@ static int report_latency(cycle_t delta)
107 return 1; 98 return 1;
108} 99}
109 100
101static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
102{
103 if (task != wakeup_task)
104 return;
105
106 wakeup_current_cpu = cpu;
107}
108
110static void notrace 109static void notrace
111probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, 110probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
112 struct task_struct *next) 111 struct task_struct *next)
@@ -244,6 +243,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
244 __wakeup_reset(wakeup_trace); 243 __wakeup_reset(wakeup_trace);
245 244
246 wakeup_cpu = task_cpu(p); 245 wakeup_cpu = task_cpu(p);
246 wakeup_current_cpu = wakeup_cpu;
247 wakeup_prio = p->prio; 247 wakeup_prio = p->prio;
248 248
249 wakeup_task = p; 249 wakeup_task = p;
@@ -293,6 +293,13 @@ static void start_wakeup_tracer(struct trace_array *tr)
293 goto fail_deprobe_wake_new; 293 goto fail_deprobe_wake_new;
294 } 294 }
295 295
296 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task);
297 if (ret) {
298 pr_info("wakeup trace: Couldn't activate tracepoint"
299 " probe to kernel_sched_migrate_task\n");
300 return;
301 }
302
296 wakeup_reset(tr); 303 wakeup_reset(tr);
297 304
298 /* 305 /*
@@ -325,6 +332,7 @@ static void stop_wakeup_tracer(struct trace_array *tr)
325 unregister_trace_sched_switch(probe_wakeup_sched_switch); 332 unregister_trace_sched_switch(probe_wakeup_sched_switch);
326 unregister_trace_sched_wakeup_new(probe_wakeup); 333 unregister_trace_sched_wakeup_new(probe_wakeup);
327 unregister_trace_sched_wakeup(probe_wakeup); 334 unregister_trace_sched_wakeup(probe_wakeup);
335 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task);
328} 336}
329 337
330static int __wakeup_tracer_init(struct trace_array *tr) 338static int __wakeup_tracer_init(struct trace_array *tr)