aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Ohly <patrick.ohly@intel.com>2009-02-12 00:03:34 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-16 01:43:31 -0500
commita038a353c3de4040d8445ec568acebdac144436f (patch)
tree2fac74d812b47fa2d9fabfa55261d544813b66f6
parent0a834a36ac92375cd82d9e4fe4f571e257997d6a (diff)
clocksource: allow usage independent of timekeeping.c
So far struct clocksource acted as the interface between time/timekeeping.c and hardware. This patch generalizes the concept so that a similar interface can also be used in other contexts. For that it introduces new structures and related functions *without* touching the existing struct clocksource. The reasons for adding these new structures to clocksource.[ch] are * the APIs are clearly related * struct clocksource could be cleaned up to use the new structs * avoids proliferation of files with similar names (timesource.h? timecounter.h?) As outlined in the discussion with John Stultz, this patch adds * struct cyclecounter: stateless API to hardware which counts clock cycles * struct timecounter: stateful utility code built on a cyclecounter which provides a nanosecond counter * only the function to read the nanosecond counter; deltas are used internally and not exposed to users of timecounter The code does no locking of the shared state. It must be called at least as often as the cycle counter wraps around to detect these wrap arounds. Both is the responsibility of the timecounter user. Acked-by: John Stultz <johnstul@us.ibm.com> Signed-off-by: Patrick Ohly <patrick.ohly@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/clocksource.h101
-rw-r--r--kernel/time/clocksource.c76
2 files changed, 177 insertions, 0 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index f88d32f8ff7c..573819ef4cc0 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -22,8 +22,109 @@ typedef u64 cycle_t;
22struct clocksource; 22struct clocksource;
23 23
24/** 24/**
25 * struct cyclecounter - hardware abstraction for a free running counter
26 * Provides completely state-free accessors to the underlying hardware.
27 * Depending on which hardware it reads, the cycle counter may wrap
28 * around quickly. Locking rules (if necessary) have to be defined
29 * by the implementor and user of specific instances of this API.
30 *
31 * @read: returns the current cycle value
32 * @mask: bitmask for two's complement
33 * subtraction of non 64 bit counters,
34 * see CLOCKSOURCE_MASK() helper macro
35 * @mult: cycle to nanosecond multiplier
36 * @shift: cycle to nanosecond divisor (power of two)
37 */
38struct cyclecounter {
39 cycle_t (*read)(const struct cyclecounter *cc);
40 cycle_t mask;
41 u32 mult;
42 u32 shift;
43};
44
45/**
46 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
47 * Contains the state needed by timecounter_read() to detect
48 * cycle counter wrap around. Initialize with
49 * timecounter_init(). Also used to convert cycle counts into the
50 * corresponding nanosecond counts with timecounter_cyc2time(). Users
51 * of this code are responsible for initializing the underlying
52 * cycle counter hardware, locking issues and reading the time
53 * more often than the cycle counter wraps around. The nanosecond
54 * counter will only wrap around after ~585 years.
55 *
56 * @cc: the cycle counter used by this instance
57 * @cycle_last: most recent cycle counter value seen by
58 * timecounter_read()
59 * @nsec: continuously increasing count
60 */
61struct timecounter {
62 const struct cyclecounter *cc;
63 cycle_t cycle_last;
64 u64 nsec;
65};
66
67/**
68 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
69 * @tc: Pointer to cycle counter.
70 * @cycles: Cycles
71 *
72 * XXX - This could use some mult_lxl_ll() asm optimization. Same code
73 * as in cyc2ns, but with unsigned result.
74 */
75static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
76 cycle_t cycles)
77{
78 u64 ret = (u64)cycles;
79 ret = (ret * cc->mult) >> cc->shift;
80 return ret;
81}
82
83/**
84 * timecounter_init - initialize a time counter
85 * @tc: Pointer to time counter which is to be initialized/reset
86 * @cc: A cycle counter, ready to be used.
87 * @start_tstamp: Arbitrary initial time stamp.
88 *
89 * After this call the current cycle register (roughly) corresponds to
90 * the initial time stamp. Every call to timecounter_read() increments
91 * the time stamp counter by the number of elapsed nanoseconds.
92 */
93extern void timecounter_init(struct timecounter *tc,
94 const struct cyclecounter *cc,
95 u64 start_tstamp);
96
97/**
98 * timecounter_read - return nanoseconds elapsed since timecounter_init()
99 * plus the initial time stamp
100 * @tc: Pointer to time counter.
101 *
102 * In other words, keeps track of time since the same epoch as
103 * the function which generated the initial time stamp.
104 */
105extern u64 timecounter_read(struct timecounter *tc);
106
107/**
108 * timecounter_cyc2time - convert a cycle counter to same
109 * time base as values returned by
110 * timecounter_read()
111 * @tc: Pointer to time counter.
112 * @cycle: a value returned by tc->cc->read()
113 *
114 * Cycle counts that are converted correctly as long as they
115 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
116 * with "max cycle count" == cs->mask+1.
117 *
118 * This allows conversion of cycle counter values which were generated
119 * in the past.
120 */
121extern u64 timecounter_cyc2time(struct timecounter *tc,
122 cycle_t cycle_tstamp);
123
124/**
25 * struct clocksource - hardware abstraction for a free running counter 125 * struct clocksource - hardware abstraction for a free running counter
26 * Provides mostly state-free accessors to the underlying hardware. 126 * Provides mostly state-free accessors to the underlying hardware.
127 * This is the structure used for system time.
27 * 128 *
28 * @name: ptr to clocksource name 129 * @name: ptr to clocksource name
29 * @list: list head for registration 130 * @list: list head for registration
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ca89e1593f08..c46c931a7fe7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -31,6 +31,82 @@
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32#include <linux/tick.h> 32#include <linux/tick.h>
33 33
34void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
36 u64 start_tstamp)
37{
38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
41}
42EXPORT_SYMBOL(timecounter_init);
43
44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
47 *
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
50 * calls.
51 *
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
54 */
55static u64 timecounter_read_delta(struct timecounter *tc)
56{
57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset;
59
60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc);
62
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65
66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68
69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now;
71
72 return ns_offset;
73}
74
75u64 timecounter_read(struct timecounter *tc)
76{
77 u64 nsec;
78
79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec;
82 tc->nsec = nsec;
83
84 return nsec;
85}
86EXPORT_SYMBOL(timecounter_read);
87
88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp)
90{
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec;
93
94 /*
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
98 */
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 }
105
106 return nsec;
107}
108EXPORT_SYMBOL(timecounter_cyc2time);
109
34/* XXX - Would like a better way for initializing curr_clocksource */ 110/* XXX - Would like a better way for initializing curr_clocksource */
35extern struct clocksource clocksource_jiffies; 111extern struct clocksource clocksource_jiffies;
36 112