diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-10-01 13:14:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:39:09 -0400 |
commit | 38697053fa006411224a1790e2adb8216440ab0f (patch) | |
tree | 30daab3a6ba93f1c8c922397ffe8a0d6a220e8b1 /kernel/trace/trace_sched_wakeup.c | |
parent | e4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30 (diff) |
ftrace: preempt disable over interrupt disable
With the new ring buffer infrastructure in ftrace, I'm trying to make
ftrace a little more light weight.
This patch converts a lot of the local_irq_save/restore into
preempt_disable/enable. The original preempt count in a lot of cases
has to be sent in as a parameter so that it can be recorded correctly.
Some places were recording it incorrectly before anyway.
This is also laying the ground work to make ftrace a little bit
more reentrant, and remove all locking. The function tracers must
still protect from reentrancy.
Note: All the function tracers must be careful when using preempt_disable.
It must do the following:
resched = need_resched();
preempt_disable_notrace();
[...]
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
The reason is that if this function traces schedule() itself, the
preempt_enable_notrace() will cause a schedule, which will lead
us into a recursive failure.
If we needed to reschedule before calling preempt_disable, we
should have already scheduled. Since we did not, this is most
likely that we should not and are probably inside a schedule
function.
If resched was not set, we still need to catch the need resched
flag being set when preemption was off and the if case at the
end will catch that for us.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 01e75e0639b7..fe4a252c2363 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
44 | long disabled; | 44 | long disabled; |
45 | int resched; | 45 | int resched; |
46 | int cpu; | 46 | int cpu; |
47 | int pc; | ||
47 | 48 | ||
48 | if (likely(!wakeup_task)) | 49 | if (likely(!wakeup_task)) |
49 | return; | 50 | return; |
50 | 51 | ||
52 | pc = preempt_count(); | ||
51 | resched = need_resched(); | 53 | resched = need_resched(); |
52 | preempt_disable_notrace(); | 54 | preempt_disable_notrace(); |
53 | 55 | ||
@@ -70,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
70 | if (task_cpu(wakeup_task) != cpu) | 72 | if (task_cpu(wakeup_task) != cpu) |
71 | goto unlock; | 73 | goto unlock; |
72 | 74 | ||
73 | trace_function(tr, data, ip, parent_ip, flags); | 75 | trace_function(tr, data, ip, parent_ip, flags, pc); |
74 | 76 | ||
75 | unlock: | 77 | unlock: |
76 | __raw_spin_unlock(&wakeup_lock); | 78 | __raw_spin_unlock(&wakeup_lock); |
@@ -121,6 +123,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
121 | unsigned long flags; | 123 | unsigned long flags; |
122 | long disabled; | 124 | long disabled; |
123 | int cpu; | 125 | int cpu; |
126 | int pc; | ||
124 | 127 | ||
125 | tracing_record_cmdline(prev); | 128 | tracing_record_cmdline(prev); |
126 | 129 | ||
@@ -139,6 +142,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
139 | if (next != wakeup_task) | 142 | if (next != wakeup_task) |
140 | return; | 143 | return; |
141 | 144 | ||
145 | pc = preempt_count(); | ||
146 | |||
142 | /* The task we are waiting for is waking up */ | 147 | /* The task we are waiting for is waking up */ |
143 | data = wakeup_trace->data[wakeup_cpu]; | 148 | data = wakeup_trace->data[wakeup_cpu]; |
144 | 149 | ||
@@ -155,7 +160,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
155 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 160 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
156 | goto out_unlock; | 161 | goto out_unlock; |
157 | 162 | ||
158 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); | 163 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
159 | 164 | ||
160 | /* | 165 | /* |
161 | * usecs conversion is slow so we try to delay the conversion | 166 | * usecs conversion is slow so we try to delay the conversion |
@@ -220,6 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
220 | int cpu = smp_processor_id(); | 225 | int cpu = smp_processor_id(); |
221 | unsigned long flags; | 226 | unsigned long flags; |
222 | long disabled; | 227 | long disabled; |
228 | int pc; | ||
223 | 229 | ||
224 | if (likely(!tracer_enabled)) | 230 | if (likely(!tracer_enabled)) |
225 | return; | 231 | return; |
@@ -232,6 +238,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
232 | p->prio >= current->prio) | 238 | p->prio >= current->prio) |
233 | return; | 239 | return; |
234 | 240 | ||
241 | pc = preempt_count(); | ||
235 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 242 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
236 | if (unlikely(disabled != 1)) | 243 | if (unlikely(disabled != 1)) |
237 | goto out; | 244 | goto out; |
@@ -256,7 +263,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p) | |||
256 | 263 | ||
257 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); | 264 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); |
258 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], | 265 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], |
259 | CALLER_ADDR1, CALLER_ADDR2, flags); | 266 | CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
260 | 267 | ||
261 | out_locked: | 268 | out_locked: |
262 | __raw_spin_unlock(&wakeup_lock); | 269 | __raw_spin_unlock(&wakeup_lock); |