diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/trace/trace_clock.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/trace/trace_clock.c')
-rw-r--r-- | kernel/trace/trace_clock.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 20c5f92e28a8..9d589d8dcd1a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * Tracer plugins will chose a default from these clocks. | 13 | * Tracer plugins will chose a default from these clocks. |
14 | */ | 14 | */ |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/irqflags.h> | ||
16 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/percpu.h> | 19 | #include <linux/percpu.h> |
@@ -20,6 +21,8 @@ | |||
20 | #include <linux/ktime.h> | 21 | #include <linux/ktime.h> |
21 | #include <linux/trace_clock.h> | 22 | #include <linux/trace_clock.h> |
22 | 23 | ||
24 | #include "trace.h" | ||
25 | |||
23 | /* | 26 | /* |
24 | * trace_clock_local(): the simplest and least coherent tracing clock. | 27 | * trace_clock_local(): the simplest and least coherent tracing clock. |
25 | * | 28 | * |
@@ -28,17 +31,17 @@ | |||
28 | */ | 31 | */ |
29 | u64 notrace trace_clock_local(void) | 32 | u64 notrace trace_clock_local(void) |
30 | { | 33 | { |
31 | unsigned long flags; | ||
32 | u64 clock; | 34 | u64 clock; |
35 | int resched; | ||
33 | 36 | ||
34 | /* | 37 | /* |
35 | * sched_clock() is an architecture implemented, fast, scalable, | 38 | * sched_clock() is an architecture implemented, fast, scalable, |
36 | * lockless clock. It is not guaranteed to be coherent across | 39 | * lockless clock. It is not guaranteed to be coherent across |
37 | * CPUs, nor across CPU idle events. | 40 | * CPUs, nor across CPU idle events. |
38 | */ | 41 | */ |
39 | raw_local_irq_save(flags); | 42 | resched = ftrace_preempt_disable(); |
40 | clock = sched_clock(); | 43 | clock = sched_clock(); |
41 | raw_local_irq_restore(flags); | 44 | ftrace_preempt_enable(resched); |
42 | 45 | ||
43 | return clock; | 46 | return clock; |
44 | } | 47 | } |
@@ -69,10 +72,10 @@ u64 notrace trace_clock(void) | |||
69 | /* keep prev_time and lock in the same cacheline. */ | 72 | /* keep prev_time and lock in the same cacheline. */ |
70 | static struct { | 73 | static struct { |
71 | u64 prev_time; | 74 | u64 prev_time; |
72 | raw_spinlock_t lock; | 75 | arch_spinlock_t lock; |
73 | } trace_clock_struct ____cacheline_aligned_in_smp = | 76 | } trace_clock_struct ____cacheline_aligned_in_smp = |
74 | { | 77 | { |
75 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 78 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
76 | }; | 79 | }; |
77 | 80 | ||
78 | u64 notrace trace_clock_global(void) | 81 | u64 notrace trace_clock_global(void) |
@@ -81,7 +84,7 @@ u64 notrace trace_clock_global(void) | |||
81 | int this_cpu; | 84 | int this_cpu; |
82 | u64 now; | 85 | u64 now; |
83 | 86 | ||
84 | raw_local_irq_save(flags); | 87 | local_irq_save(flags); |
85 | 88 | ||
86 | this_cpu = raw_smp_processor_id(); | 89 | this_cpu = raw_smp_processor_id(); |
87 | now = cpu_clock(this_cpu); | 90 | now = cpu_clock(this_cpu); |
@@ -92,7 +95,7 @@ u64 notrace trace_clock_global(void) | |||
92 | if (unlikely(in_nmi())) | 95 | if (unlikely(in_nmi())) |
93 | goto out; | 96 | goto out; |
94 | 97 | ||
95 | __raw_spin_lock(&trace_clock_struct.lock); | 98 | arch_spin_lock(&trace_clock_struct.lock); |
96 | 99 | ||
97 | /* | 100 | /* |
98 | * TODO: if this happens often then maybe we should reset | 101 | * TODO: if this happens often then maybe we should reset |
@@ -104,10 +107,10 @@ u64 notrace trace_clock_global(void) | |||
104 | 107 | ||
105 | trace_clock_struct.prev_time = now; | 108 | trace_clock_struct.prev_time = now; |
106 | 109 | ||
107 | __raw_spin_unlock(&trace_clock_struct.lock); | 110 | arch_spin_unlock(&trace_clock_struct.lock); |
108 | 111 | ||
109 | out: | 112 | out: |
110 | raw_local_irq_restore(flags); | 113 | local_irq_restore(flags); |
111 | 114 | ||
112 | return now; | 115 | return now; |
113 | } | 116 | } |