aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc_64.c
diff options
context:
space:
mode:
authorAlok Kataria <akataria@vmware.com>2008-07-01 14:43:31 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 01:43:26 -0400
commit2dbe06faf37b39f9ecffc054dd173b2a1dc2adcd (patch)
tree530104ee7b810420983bcb2674724721ced76d0a /arch/x86/kernel/tsc_64.c
parentbfc0f5947afa5e3a13e55867f4478c8a92c11dca (diff)
x86: merge the TSC cpu-freq code
Unify the TSC cpufreq code. Signed-off-by: Alok N Kataria <akataria@vmware.com> Signed-off-by: Dan Hecht <dhecht@vmware.com> Cc: Dan Hecht <dhecht@vmware.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tsc_64.c')
-rw-r--r--arch/x86/kernel/tsc_64.c114
1 files changed, 0 insertions, 114 deletions
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index c852ff9bd5d4..80a274b018c2 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -16,120 +16,6 @@
16extern int tsc_unstable; 16extern int tsc_unstable;
17extern int tsc_disabled; 17extern int tsc_disabled;
18 18
19/* Accelerators for sched_clock()
20 * convert from cycles(64bits) => nanoseconds (64bits)
21 * basic equation:
22 * ns = cycles / (freq / ns_per_sec)
23 * ns = cycles * (ns_per_sec / freq)
24 * ns = cycles * (10^9 / (cpu_khz * 10^3))
25 * ns = cycles * (10^6 / cpu_khz)
26 *
27 * Then we use scaling math (suggested by george@mvista.com) to get:
28 * ns = cycles * (10^6 * SC / cpu_khz) / SC
29 * ns = cycles * cyc2ns_scale / SC
30 *
31 * And since SC is a constant power of two, we can convert the div
32 * into a shift.
33 *
34 * We can use khz divisor instead of mhz to keep a better precision, since
35 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
36 * (mathieu.desnoyers@polymtl.ca)
37 *
38 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
39 */
40
41DEFINE_PER_CPU(unsigned long, cyc2ns);
42
43void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
44{
45 unsigned long long tsc_now, ns_now;
46 unsigned long flags, *scale;
47
48 local_irq_save(flags);
49 sched_clock_idle_sleep_event();
50
51 scale = &per_cpu(cyc2ns, cpu);
52
53 rdtscll(tsc_now);
54 ns_now = __cycles_2_ns(tsc_now);
55
56 if (cpu_khz)
57 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
58
59 sched_clock_idle_wakeup_event(0);
60 local_irq_restore(flags);
61}
62
63#ifdef CONFIG_CPU_FREQ
64
65/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
66 * changes.
67 *
68 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
69 * not that important because current Opteron setups do not support
70 * scaling on SMP anyroads.
71 *
72 * Should fix up last_tsc too. Currently gettimeofday in the
73 * first tick after the change will be slightly wrong.
74 */
75
76static unsigned int ref_freq;
77static unsigned long loops_per_jiffy_ref;
78static unsigned long tsc_khz_ref;
79
80static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
81 void *data)
82{
83 struct cpufreq_freqs *freq = data;
84 unsigned long *lpj, dummy;
85
86 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
87 return 0;
88
89 lpj = &dummy;
90 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
91#ifdef CONFIG_SMP
92 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
93#else
94 lpj = &boot_cpu_data.loops_per_jiffy;
95#endif
96
97 if (!ref_freq) {
98 ref_freq = freq->old;
99 loops_per_jiffy_ref = *lpj;
100 tsc_khz_ref = tsc_khz;
101 }
102 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
103 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
104 (val == CPUFREQ_RESUMECHANGE)) {
105 *lpj =
106 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
107
108 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
109 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
110 mark_tsc_unstable("cpufreq changes");
111 }
112
113 set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
114
115 return 0;
116}
117
118static struct notifier_block time_cpufreq_notifier_block = {
119 .notifier_call = time_cpufreq_notifier
120};
121
122static int __init cpufreq_tsc(void)
123{
124 cpufreq_register_notifier(&time_cpufreq_notifier_block,
125 CPUFREQ_TRANSITION_NOTIFIER);
126 return 0;
127}
128
129core_initcall(cpufreq_tsc);
130
131#endif
132
133/* 19/*
134 * Make an educated guess if the TSC is trustworthy and synchronized 20 * Make an educated guess if the TSC is trustworthy and synchronized
135 * over all CPUs. 21 * over all CPUs.