aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/timer.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/timer.h')
-rw-r--r--arch/x86/include/asm/timer.h78
1 files changed, 18 insertions, 60 deletions
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 34baa0eb5d0c..a04eabd43d06 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -1,9 +1,9 @@
1#ifndef _ASM_X86_TIMER_H 1#ifndef _ASM_X86_TIMER_H
2#define _ASM_X86_TIMER_H 2#define _ASM_X86_TIMER_H
3#include <linux/init.h>
4#include <linux/pm.h> 3#include <linux/pm.h>
5#include <linux/percpu.h> 4#include <linux/percpu.h>
6#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/math64.h>
7 7
8#define TICK_SIZE (tick_nsec / 1000) 8#define TICK_SIZE (tick_nsec / 1000)
9 9
@@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void);
12 12
13extern int no_timer_check; 13extern int no_timer_check;
14 14
15/* Accelerators for sched_clock() 15/*
16 * convert from cycles(64bits) => nanoseconds (64bits) 16 * We use the full linear equation: f(x) = a + b*x, in order to allow
17 * basic equation: 17 * a continuous function in the face of dynamic freq changes.
18 * ns = cycles / (freq / ns_per_sec)
19 * ns = cycles * (ns_per_sec / freq)
20 * ns = cycles * (10^9 / (cpu_khz * 10^3))
21 * ns = cycles * (10^6 / cpu_khz)
22 * 18 *
23 * Then we use scaling math (suggested by george@mvista.com) to get: 19 * Continuity means that when our frequency changes our slope (b); we want to
24 * ns = cycles * (10^6 * SC / cpu_khz) / SC 20 * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t.
25 * ns = cycles * cyc2ns_scale / SC
26 * 21 *
27 * And since SC is a constant power of two, we can convert the div 22 * Without an offset (a) the above would not be possible.
28 * into a shift.
29 * 23 *
30 * We can use khz divisor instead of mhz to keep a better precision, since 24 * See the comment near cycles_2_ns() for details on how we compute (b).
31 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
32 * (mathieu.desnoyers@polymtl.ca)
33 *
34 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
35 *
36 * In:
37 *
38 * ns = cycles * cyc2ns_scale / SC
39 *
40 * Although we may still have enough bits to store the value of ns,
41 * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
42 * leading to an incorrect result.
43 *
44 * To avoid this, we can decompose 'cycles' into quotient and remainder
45 * of division by SC. Then,
46 *
47 * ns = (quot * SC + rem) * cyc2ns_scale / SC
48 * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
49 *
50 * - sqazi@google.com
51 */ 25 */
52 26struct cyc2ns_data {
53DECLARE_PER_CPU(unsigned long, cyc2ns); 27 u32 cyc2ns_mul;
54DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); 28 u32 cyc2ns_shift;
55 29 u64 cyc2ns_offset;
56#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 30 u32 __count;
57 31 /* u32 hole */
58static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 32}; /* 24 bytes -- do not grow */
59{ 33
60 int cpu = smp_processor_id(); 34extern struct cyc2ns_data *cyc2ns_read_begin(void);
61 unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 35extern void cyc2ns_read_end(struct cyc2ns_data *);
62 ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
63 (1UL << CYC2NS_SCALE_FACTOR));
64 return ns;
65}
66
67static inline unsigned long long cycles_2_ns(unsigned long long cyc)
68{
69 unsigned long long ns;
70 unsigned long flags;
71
72 local_irq_save(flags);
73 ns = __cycles_2_ns(cyc);
74 local_irq_restore(flags);
75
76 return ns;
77}
78 36
79#endif /* _ASM_X86_TIMER_H */ 37#endif /* _ASM_X86_TIMER_H */