diff options
-rw-r--r-- | kernel/trace/ftrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 4 |
7 files changed, 16 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 97c40865a93e..a15e068535f8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -531,7 +531,7 @@ static int notrace __ftrace_update_code(void *ignore) | |||
531 | save_ftrace_enabled = ftrace_enabled; | 531 | save_ftrace_enabled = ftrace_enabled; |
532 | ftrace_enabled = 0; | 532 | ftrace_enabled = 0; |
533 | 533 | ||
534 | start = now(raw_smp_processor_id()); | 534 | start = ftrace_now(raw_smp_processor_id()); |
535 | ftrace_update_cnt = 0; | 535 | ftrace_update_cnt = 0; |
536 | 536 | ||
537 | /* No locks needed, the machine is stopped! */ | 537 | /* No locks needed, the machine is stopped! */ |
@@ -550,7 +550,7 @@ static int notrace __ftrace_update_code(void *ignore) | |||
550 | 550 | ||
551 | } | 551 | } |
552 | 552 | ||
553 | stop = now(raw_smp_processor_id()); | 553 | stop = ftrace_now(raw_smp_processor_id()); |
554 | ftrace_update_time = stop - start; | 554 | ftrace_update_time = stop - start; |
555 | ftrace_update_tot_cnt += ftrace_update_cnt; | 555 | ftrace_update_tot_cnt += ftrace_update_cnt; |
556 | 556 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4550afda9607..e3778ab0d3f7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -42,6 +42,11 @@ ns2usecs(cycle_t nsec) | |||
42 | return nsec; | 42 | return nsec; |
43 | } | 43 | } |
44 | 44 | ||
45 | notrace cycle_t ftrace_now(int cpu) | ||
46 | { | ||
47 | return cpu_clock(cpu); | ||
48 | } | ||
49 | |||
45 | static atomic_t tracer_counter; | 50 | static atomic_t tracer_counter; |
46 | static struct trace_array global_trace; | 51 | static struct trace_array global_trace; |
47 | 52 | ||
@@ -607,7 +612,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | |||
607 | entry->idx = atomic_inc_return(&tracer_counter); | 612 | entry->idx = atomic_inc_return(&tracer_counter); |
608 | entry->preempt_count = pc & 0xff; | 613 | entry->preempt_count = pc & 0xff; |
609 | entry->pid = tsk->pid; | 614 | entry->pid = tsk->pid; |
610 | entry->t = now(raw_smp_processor_id()); | 615 | entry->t = ftrace_now(raw_smp_processor_id()); |
611 | entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 616 | entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
612 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 617 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
613 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 618 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b0408356f0e0..30cad677e9d0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -171,10 +171,7 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |||
171 | void update_max_tr_single(struct trace_array *tr, | 171 | void update_max_tr_single(struct trace_array *tr, |
172 | struct task_struct *tsk, int cpu); | 172 | struct task_struct *tsk, int cpu); |
173 | 173 | ||
174 | static inline notrace cycle_t now(int cpu) | 174 | extern notrace cycle_t ftrace_now(int cpu); |
175 | { | ||
176 | return cpu_clock(cpu); | ||
177 | } | ||
178 | 175 | ||
179 | #ifdef CONFIG_SCHED_TRACER | 176 | #ifdef CONFIG_SCHED_TRACER |
180 | extern void notrace | 177 | extern void notrace |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 5d8ad7a09605..e5d34b78fc99 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -20,7 +20,7 @@ static notrace void function_reset(struct trace_array *tr) | |||
20 | { | 20 | { |
21 | int cpu; | 21 | int cpu; |
22 | 22 | ||
23 | tr->time_start = now(tr->cpu); | 23 | tr->time_start = ftrace_now(tr->cpu); |
24 | 24 | ||
25 | for_each_online_cpu(cpu) | 25 | for_each_online_cpu(cpu) |
26 | tracing_reset(tr->data[cpu]); | 26 | tracing_reset(tr->data[cpu]); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2dfebb67fdfb..d2a6e6f1ad2d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -136,7 +136,7 @@ check_critical_timing(struct trace_array *tr, | |||
136 | * as long as possible: | 136 | * as long as possible: |
137 | */ | 137 | */ |
138 | T0 = data->preempt_timestamp; | 138 | T0 = data->preempt_timestamp; |
139 | T1 = now(cpu); | 139 | T1 = ftrace_now(cpu); |
140 | delta = T1-T0; | 140 | delta = T1-T0; |
141 | 141 | ||
142 | local_save_flags(flags); | 142 | local_save_flags(flags); |
@@ -186,7 +186,7 @@ out_unlock: | |||
186 | 186 | ||
187 | out: | 187 | out: |
188 | data->critical_sequence = max_sequence; | 188 | data->critical_sequence = max_sequence; |
189 | data->preempt_timestamp = now(cpu); | 189 | data->preempt_timestamp = ftrace_now(cpu); |
190 | tracing_reset(data); | 190 | tracing_reset(data); |
191 | ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); | 191 | ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); |
192 | } | 192 | } |
@@ -215,7 +215,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
215 | atomic_inc(&data->disabled); | 215 | atomic_inc(&data->disabled); |
216 | 216 | ||
217 | data->critical_sequence = max_sequence; | 217 | data->critical_sequence = max_sequence; |
218 | data->preempt_timestamp = now(cpu); | 218 | data->preempt_timestamp = ftrace_now(cpu); |
219 | data->critical_start = parent_ip ? : ip; | 219 | data->critical_start = parent_ip ? : ip; |
220 | tracing_reset(data); | 220 | tracing_reset(data); |
221 | 221 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 6c9284103a62..8d656672da93 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -61,7 +61,7 @@ static notrace void sched_switch_reset(struct trace_array *tr) | |||
61 | { | 61 | { |
62 | int cpu; | 62 | int cpu; |
63 | 63 | ||
64 | tr->time_start = now(tr->cpu); | 64 | tr->time_start = ftrace_now(tr->cpu); |
65 | 65 | ||
66 | for_each_online_cpu(cpu) | 66 | for_each_online_cpu(cpu) |
67 | tracing_reset(tr->data[cpu]); | 67 | tracing_reset(tr->data[cpu]); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 688df965f3f2..b7df825c3af9 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -92,7 +92,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) | |||
92 | * as long as possible: | 92 | * as long as possible: |
93 | */ | 93 | */ |
94 | T0 = data->preempt_timestamp; | 94 | T0 = data->preempt_timestamp; |
95 | T1 = now(cpu); | 95 | T1 = ftrace_now(cpu); |
96 | delta = T1-T0; | 96 | delta = T1-T0; |
97 | 97 | ||
98 | if (!report_latency(delta)) | 98 | if (!report_latency(delta)) |
@@ -191,7 +191,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
191 | 191 | ||
192 | local_save_flags(flags); | 192 | local_save_flags(flags); |
193 | 193 | ||
194 | tr->data[wakeup_cpu]->preempt_timestamp = now(cpu); | 194 | tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); |
195 | ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); | 195 | ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); |
196 | 196 | ||
197 | out_locked: | 197 | out_locked: |