aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_clock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-05 14:04:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-05 14:04:19 -0400
commit714f83d5d9f7c785f622259dad1f4fad12d64664 (patch)
tree20563541ae438e11d686b4d629074eb002a481b7 /kernel/trace/trace_clock.c
parent8901e7ffc2fa78ede7ce9826dbad68a3a25dc2dc (diff)
parent645dae969c3b8651c5bc7c54a1835ec03820f85f (diff)
Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (413 commits) tracing, net: fix net tree and tracing tree merge interaction tracing, powerpc: fix powerpc tree and tracing tree interaction ring-buffer: do not remove reader page from list on ring buffer free function-graph: allow unregistering twice trace: make argument 'mem' of trace_seq_putmem() const tracing: add missing 'extern' keywords to trace_output.h tracing: provide trace_seq_reserve() blktrace: print out BLK_TN_MESSAGE properly blktrace: extract duplidate code blktrace: fix memory leak when freeing struct blk_io_trace blktrace: fix blk_probes_ref chaos blktrace: make classic output more classic blktrace: fix off-by-one bug blktrace: fix the original blktrace blktrace: fix a race when creating blk_tree_root in debugfs blktrace: fix timestamp in binary output tracing, Text Edit Lock: cleanup tracing: filter fix for TRACE_EVENT_FORMAT events ftrace: Using FTRACE_WARN_ON() to check "freed record" in ftrace_release() x86: kretprobe-booster interrupt emulation code fix ... Fix up trivial conflicts in arch/parisc/include/asm/ftrace.h include/linux/memory.h kernel/extable.c kernel/module.c
Diffstat (limited to 'kernel/trace/trace_clock.c')
-rw-r--r--kernel/trace/trace_clock.c109
1 files changed, 109 insertions, 0 deletions
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
new file mode 100644
index 000000000000..b588fd81f7f9
--- /dev/null
+++ b/kernel/trace/trace_clock.c
@@ -0,0 +1,109 @@
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/module.h>
18#include <linux/percpu.h>
19#include <linux/sched.h>
20#include <linux/ktime.h>
21#include <linux/trace_clock.h>
22
23/*
24 * trace_clock_local(): the simplest and least coherent tracing clock.
25 *
26 * Useful for tracing that does not cross to other CPUs nor
27 * does it go through idle events.
28 */
29u64 notrace trace_clock_local(void)
30{
31 unsigned long flags;
32 u64 clock;
33
34 /*
35 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events.
38 */
39 raw_local_irq_save(flags);
40 clock = sched_clock();
41 raw_local_irq_restore(flags);
42
43 return clock;
44}
45
46/*
47 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
48 * but not completely incorrect when crossing CPUs either.
49 *
50 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
51 * jitter between CPUs. So it's a pretty scalable clock, but there
52 * can be offsets in the trace data.
53 */
54u64 notrace trace_clock(void)
55{
56 return cpu_clock(raw_smp_processor_id());
57}
58
59
60/*
61 * trace_clock_global(): special globally coherent trace clock
62 *
63 * It has higher overhead than the other trace clocks but is still
64 * an order of magnitude faster than GTOD derived hardware clocks.
65 *
66 * Used by plugins that need globally coherent timestamps.
67 */
68
69static u64 prev_trace_clock_time;
70
71static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
72 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
73
74u64 notrace trace_clock_global(void)
75{
76 unsigned long flags;
77 int this_cpu;
78 u64 now;
79
80 raw_local_irq_save(flags);
81
82 this_cpu = raw_smp_processor_id();
83 now = cpu_clock(this_cpu);
84 /*
85 * If in an NMI context then dont risk lockups and return the
86 * cpu_clock() time:
87 */
88 if (unlikely(in_nmi()))
89 goto out;
90
91 __raw_spin_lock(&trace_clock_lock);
92
93 /*
94 * TODO: if this happens often then maybe we should reset
95 * my_scd->clock to prev_trace_clock_time+1, to make sure
96 * we start ticking with the local clock from now on?
97 */
98 if ((s64)(now - prev_trace_clock_time) < 0)
99 now = prev_trace_clock_time + 1;
100
101 prev_trace_clock_time = now;
102
103 __raw_spin_unlock(&trace_clock_lock);
104
105 out:
106 raw_local_irq_restore(flags);
107
108 return now;
109}