aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_irqsoff.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c10
2 files changed, 1 insertions, 21 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5555b75a0d12..06f8ea9e4b9d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -129,15 +129,10 @@ check_critical_timing(struct trace_array *tr,
129 unsigned long parent_ip, 129 unsigned long parent_ip,
130 int cpu) 130 int cpu)
131{ 131{
132 unsigned long latency, t0, t1;
133 cycle_t T0, T1, delta; 132 cycle_t T0, T1, delta;
134 unsigned long flags; 133 unsigned long flags;
135 int pc; 134 int pc;
136 135
137 /*
138 * usecs conversion is slow so we try to delay the conversion
139 * as long as possible:
140 */
141 T0 = data->preempt_timestamp; 136 T0 = data->preempt_timestamp;
142 T1 = ftrace_now(cpu); 137 T1 = ftrace_now(cpu);
143 delta = T1-T0; 138 delta = T1-T0;
@@ -157,17 +152,12 @@ check_critical_timing(struct trace_array *tr,
157 152
158 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
159 154
160 latency = nsecs_to_usecs(delta);
161
162 if (data->critical_sequence != max_sequence) 155 if (data->critical_sequence != max_sequence)
163 goto out_unlock; 156 goto out_unlock;
164 157
165 tracing_max_latency = delta;
166 t0 = nsecs_to_usecs(T0);
167 t1 = nsecs_to_usecs(T1);
168
169 data->critical_end = parent_ip; 158 data->critical_end = parent_ip;
170 159
160 tracing_max_latency = delta;
171 update_max_tr_single(tr, current, cpu); 161 update_max_tr_single(tr, current, cpu);
172 162
173 max_sequence++; 163 max_sequence++;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index cf43bdb1763a..6e1529bc6172 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -110,7 +110,6 @@ static void notrace
110probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, 110probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
111 struct task_struct *next) 111 struct task_struct *next)
112{ 112{
113 unsigned long latency = 0, t0 = 0, t1 = 0;
114 struct trace_array_cpu *data; 113 struct trace_array_cpu *data;
115 cycle_t T0, T1, delta; 114 cycle_t T0, T1, delta;
116 unsigned long flags; 115 unsigned long flags;
@@ -156,10 +155,6 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
156 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 155 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
157 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 156 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
158 157
159 /*
160 * usecs conversion is slow so we try to delay the conversion
161 * as long as possible:
162 */
163 T0 = data->preempt_timestamp; 158 T0 = data->preempt_timestamp;
164 T1 = ftrace_now(cpu); 159 T1 = ftrace_now(cpu);
165 delta = T1-T0; 160 delta = T1-T0;
@@ -167,12 +162,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
167 if (!report_latency(delta)) 162 if (!report_latency(delta))
168 goto out_unlock; 163 goto out_unlock;
169 164
170 latency = nsecs_to_usecs(delta);
171
172 tracing_max_latency = delta; 165 tracing_max_latency = delta;
173 t0 = nsecs_to_usecs(T0);
174 t1 = nsecs_to_usecs(T1);
175
176 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); 166 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
177 167
178out_unlock: 168out_unlock: