diff options
Diffstat (limited to 'kernel/trace/trace_clock.c')
| -rw-r--r-- | kernel/trace/trace_clock.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 20c5f92e28a8..84a3a7ba072a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/ktime.h> | 20 | #include <linux/ktime.h> |
| 21 | #include <linux/trace_clock.h> | 21 | #include <linux/trace_clock.h> |
| 22 | 22 | ||
| 23 | #include "trace.h" | ||
| 24 | |||
| 23 | /* | 25 | /* |
| 24 | * trace_clock_local(): the simplest and least coherent tracing clock. | 26 | * trace_clock_local(): the simplest and least coherent tracing clock. |
| 25 | * | 27 | * |
| @@ -28,17 +30,17 @@ | |||
| 28 | */ | 30 | */ |
| 29 | u64 notrace trace_clock_local(void) | 31 | u64 notrace trace_clock_local(void) |
| 30 | { | 32 | { |
| 31 | unsigned long flags; | ||
| 32 | u64 clock; | 33 | u64 clock; |
| 34 | int resched; | ||
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| 35 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
| 36 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
| 37 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
| 38 | */ | 40 | */ |
| 39 | raw_local_irq_save(flags); | 41 | resched = ftrace_preempt_disable(); |
| 40 | clock = sched_clock(); | 42 | clock = sched_clock(); |
| 41 | raw_local_irq_restore(flags); | 43 | ftrace_preempt_enable(resched); |
| 42 | 44 | ||
| 43 | return clock; | 45 | return clock; |
| 44 | } | 46 | } |
| @@ -69,10 +71,10 @@ u64 notrace trace_clock(void) | |||
| 69 | /* keep prev_time and lock in the same cacheline. */ | 71 | /* keep prev_time and lock in the same cacheline. */ |
| 70 | static struct { | 72 | static struct { |
| 71 | u64 prev_time; | 73 | u64 prev_time; |
| 72 | raw_spinlock_t lock; | 74 | arch_spinlock_t lock; |
| 73 | } trace_clock_struct ____cacheline_aligned_in_smp = | 75 | } trace_clock_struct ____cacheline_aligned_in_smp = |
| 74 | { | 76 | { |
| 75 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 77 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
| 76 | }; | 78 | }; |
| 77 | 79 | ||
| 78 | u64 notrace trace_clock_global(void) | 80 | u64 notrace trace_clock_global(void) |
| @@ -92,7 +94,7 @@ u64 notrace trace_clock_global(void) | |||
| 92 | if (unlikely(in_nmi())) | 94 | if (unlikely(in_nmi())) |
| 93 | goto out; | 95 | goto out; |
| 94 | 96 | ||
| 95 | __raw_spin_lock(&trace_clock_struct.lock); | 97 | arch_spin_lock(&trace_clock_struct.lock); |
| 96 | 98 | ||
| 97 | /* | 99 | /* |
| 98 | * TODO: if this happens often then maybe we should reset | 100 | * TODO: if this happens often then maybe we should reset |
| @@ -104,7 +106,7 @@ u64 notrace trace_clock_global(void) | |||
| 104 | 106 | ||
| 105 | trace_clock_struct.prev_time = now; | 107 | trace_clock_struct.prev_time = now; |
| 106 | 108 | ||
| 107 | __raw_spin_unlock(&trace_clock_struct.lock); | 109 | arch_spin_unlock(&trace_clock_struct.lock); |
| 108 | 110 | ||
| 109 | out: | 111 | out: |
| 110 | raw_local_irq_restore(flags); | 112 | raw_local_irq_restore(flags); |
