diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 40 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 |
2 files changed, 5 insertions, 37 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9a931c7c2da3..ce8ceb8aea6a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -99,7 +99,6 @@ notrace cycle_t ftrace_now(int cpu) | |||
99 | return time; | 99 | return time; |
100 | } | 100 | } |
101 | 101 | ||
102 | static atomic_t tracer_counter; | ||
103 | static struct trace_array global_trace; | 102 | static struct trace_array global_trace; |
104 | 103 | ||
105 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 104 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
@@ -661,7 +660,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | |||
661 | 660 | ||
662 | pc = preempt_count(); | 661 | pc = preempt_count(); |
663 | 662 | ||
664 | entry->idx = atomic_inc_return(&tracer_counter); | ||
665 | entry->preempt_count = pc & 0xff; | 663 | entry->preempt_count = pc & 0xff; |
666 | entry->pid = tsk->pid; | 664 | entry->pid = tsk->pid; |
667 | entry->t = ftrace_now(raw_smp_processor_id()); | 665 | entry->t = ftrace_now(raw_smp_processor_id()); |
@@ -757,8 +755,10 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu) | |||
757 | if (!head_page(tr->data[cpu])) | 755 | if (!head_page(tr->data[cpu])) |
758 | continue; | 756 | continue; |
759 | ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); | 757 | ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); |
760 | if (ent && | 758 | /* |
761 | (!next || (long)(next->idx - ent->idx) > 0)) { | 759 | * Pick the entry with the smallest timestamp: |
760 | */ | ||
761 | if (ent && (!next || ent->t < next->t)) { | ||
762 | next = ent; | 762 | next = ent; |
763 | next_cpu = cpu; | 763 | next_cpu = cpu; |
764 | } | 764 | } |
@@ -800,8 +800,6 @@ trace_consume(struct trace_iterator *iter) | |||
800 | if (data->trace_head == data->trace_tail && | 800 | if (data->trace_head == data->trace_tail && |
801 | data->trace_head_idx == data->trace_tail_idx) | 801 | data->trace_head_idx == data->trace_tail_idx) |
802 | data->trace_idx = 0; | 802 | data->trace_idx = 0; |
803 | |||
804 | trace_iterator_increment(iter); | ||
805 | } | 803 | } |
806 | 804 | ||
807 | static notrace void * | 805 | static notrace void * |
@@ -1160,33 +1158,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1160 | } | 1158 | } |
1161 | } | 1159 | } |
1162 | 1160 | ||
1163 | static notrace void sync_time_offset(struct trace_iterator *iter) | ||
1164 | { | ||
1165 | struct trace_array_cpu *prev_array, *array; | ||
1166 | struct trace_entry *prev_entry, *entry; | ||
1167 | cycle_t prev_t, t; | ||
1168 | |||
1169 | entry = iter->ent; | ||
1170 | prev_entry = iter->prev_ent; | ||
1171 | if (!prev_entry) | ||
1172 | return; | ||
1173 | |||
1174 | prev_array = iter->tr->data[iter->prev_cpu]; | ||
1175 | array = iter->tr->data[iter->cpu]; | ||
1176 | |||
1177 | prev_t = prev_entry->t + prev_array->time_offset; | ||
1178 | t = entry->t + array->time_offset; | ||
1179 | |||
1180 | /* | ||
1181 | * If time goes backwards we increase the offset of | ||
1182 | * the current array, to not have observable time warps. | ||
1183 | * This will quickly synchronize the time offsets of | ||
1184 | * multiple CPUs: | ||
1185 | */ | ||
1186 | if (t < prev_t) | ||
1187 | array->time_offset += prev_t - t; | ||
1188 | } | ||
1189 | |||
1190 | static notrace int | 1161 | static notrace int |
1191 | print_trace_fmt(struct trace_iterator *iter) | 1162 | print_trace_fmt(struct trace_iterator *iter) |
1192 | { | 1163 | { |
@@ -1200,12 +1171,11 @@ print_trace_fmt(struct trace_iterator *iter) | |||
1200 | int S; | 1171 | int S; |
1201 | int ret; | 1172 | int ret; |
1202 | 1173 | ||
1203 | sync_time_offset(iter); | ||
1204 | entry = iter->ent; | 1174 | entry = iter->ent; |
1205 | 1175 | ||
1206 | comm = trace_find_cmdline(iter->ent->pid); | 1176 | comm = trace_find_cmdline(iter->ent->pid); |
1207 | 1177 | ||
1208 | t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset); | 1178 | t = ns2usecs(entry->t); |
1209 | usec_rem = do_div(t, 1000000ULL); | 1179 | usec_rem = do_div(t, 1000000ULL); |
1210 | secs = (unsigned long)t; | 1180 | secs = (unsigned long)t; |
1211 | 1181 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 30cad677e9d0..27fa2d06f499 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -38,7 +38,6 @@ struct trace_entry { | |||
38 | char preempt_count; | 38 | char preempt_count; |
39 | int pid; | 39 | int pid; |
40 | cycle_t t; | 40 | cycle_t t; |
41 | unsigned long idx; | ||
42 | union { | 41 | union { |
43 | struct ftrace_entry fn; | 42 | struct ftrace_entry fn; |
44 | struct ctx_switch_entry ctx; | 43 | struct ctx_switch_entry ctx; |
@@ -57,7 +56,6 @@ struct trace_array_cpu { | |||
57 | atomic_t disabled; | 56 | atomic_t disabled; |
58 | spinlock_t lock; | 57 | spinlock_t lock; |
59 | struct lock_class_key lock_key; | 58 | struct lock_class_key lock_key; |
60 | cycle_t time_offset; | ||
61 | 59 | ||
62 | /* these fields get copied into max-trace: */ | 60 | /* these fields get copied into max-trace: */ |
63 | unsigned trace_head_idx; | 61 | unsigned trace_head_idx; |