diff options
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e0b06db0f7af..c7fa08a5b7f4 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -26,6 +26,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
26 | unsigned long flags; | 26 | unsigned long flags; |
27 | long disabled; | 27 | long disabled; |
28 | int cpu; | 28 | int cpu; |
29 | int pc; | ||
29 | 30 | ||
30 | if (!atomic_read(&sched_ref)) | 31 | if (!atomic_read(&sched_ref)) |
31 | return; | 32 | return; |
@@ -36,13 +37,14 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
36 | if (!tracer_enabled) | 37 | if (!tracer_enabled) |
37 | return; | 38 | return; |
38 | 39 | ||
40 | pc = preempt_count(); | ||
39 | local_irq_save(flags); | 41 | local_irq_save(flags); |
40 | cpu = raw_smp_processor_id(); | 42 | cpu = raw_smp_processor_id(); |
41 | data = ctx_trace->data[cpu]; | 43 | data = ctx_trace->data[cpu]; |
42 | disabled = atomic_inc_return(&data->disabled); | 44 | disabled = atomic_inc_return(&data->disabled); |
43 | 45 | ||
44 | if (likely(disabled == 1)) | 46 | if (likely(disabled == 1)) |
45 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags); | 47 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); |
46 | 48 | ||
47 | atomic_dec(&data->disabled); | 49 | atomic_dec(&data->disabled); |
48 | local_irq_restore(flags); | 50 | local_irq_restore(flags); |
@@ -54,11 +56,12 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
54 | struct trace_array_cpu *data; | 56 | struct trace_array_cpu *data; |
55 | unsigned long flags; | 57 | unsigned long flags; |
56 | long disabled; | 58 | long disabled; |
57 | int cpu; | 59 | int cpu, pc; |
58 | 60 | ||
59 | if (!likely(tracer_enabled)) | 61 | if (!likely(tracer_enabled)) |
60 | return; | 62 | return; |
61 | 63 | ||
64 | pc = preempt_count(); | ||
62 | tracing_record_cmdline(current); | 65 | tracing_record_cmdline(current); |
63 | 66 | ||
64 | local_irq_save(flags); | 67 | local_irq_save(flags); |
@@ -68,7 +71,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
68 | 71 | ||
69 | if (likely(disabled == 1)) | 72 | if (likely(disabled == 1)) |
70 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, | 73 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, |
71 | flags); | 74 | flags, pc); |
72 | 75 | ||
73 | atomic_dec(&data->disabled); | 76 | atomic_dec(&data->disabled); |
74 | local_irq_restore(flags); | 77 | local_irq_restore(flags); |