aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlok Kataria <akataria@vmware.com>2008-07-01 14:43:31 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 01:43:26 -0400
commit2dbe06faf37b39f9ecffc054dd173b2a1dc2adcd (patch)
tree530104ee7b810420983bcb2674724721ced76d0a
parentbfc0f5947afa5e3a13e55867f4478c8a92c11dca (diff)
x86: merge the TSC cpu-freq code
Unify the TSC cpufreq code. Signed-off-by: Alok N Kataria <akataria@vmware.com> Signed-off-by: Dan Hecht <dhecht@vmware.com> Cc: Dan Hecht <dhecht@vmware.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/tsc.c114
-rw-r--r--arch/x86/kernel/tsc_32.c113
-rw-r--r--arch/x86/kernel/tsc_64.c114
3 files changed, 114 insertions, 227 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e6ee14533c75..595f78a22212 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/timer.h> 5#include <linux/timer.h>
6#include <linux/acpi_pmtmr.h> 6#include <linux/acpi_pmtmr.h>
7#include <linux/cpufreq.h>
7 8
8#include <asm/hpet.h> 9#include <asm/hpet.h>
9 10
@@ -215,3 +216,116 @@ int recalibrate_cpu_khz(void)
215EXPORT_SYMBOL(recalibrate_cpu_khz); 216EXPORT_SYMBOL(recalibrate_cpu_khz);
216 217
217#endif /* CONFIG_X86_32 */ 218#endif /* CONFIG_X86_32 */
219
220/* Accelerators for sched_clock()
221 * convert from cycles(64bits) => nanoseconds (64bits)
222 * basic equation:
223 * ns = cycles / (freq / ns_per_sec)
224 * ns = cycles * (ns_per_sec / freq)
225 * ns = cycles * (10^9 / (cpu_khz * 10^3))
226 * ns = cycles * (10^6 / cpu_khz)
227 *
228 * Then we use scaling math (suggested by george@mvista.com) to get:
229 * ns = cycles * (10^6 * SC / cpu_khz) / SC
230 * ns = cycles * cyc2ns_scale / SC
231 *
232 * And since SC is a constant power of two, we can convert the div
233 * into a shift.
234 *
235 * We can use khz divisor instead of mhz to keep a better precision, since
236 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
237 * (mathieu.desnoyers@polymtl.ca)
238 *
239 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
240 */
241
242DEFINE_PER_CPU(unsigned long, cyc2ns);
243
244void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
245{
246 unsigned long long tsc_now, ns_now;
247 unsigned long flags, *scale;
248
249 local_irq_save(flags);
250 sched_clock_idle_sleep_event();
251
252 scale = &per_cpu(cyc2ns, cpu);
253
254 rdtscll(tsc_now);
255 ns_now = __cycles_2_ns(tsc_now);
256
257 if (cpu_khz)
258 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
259
260 sched_clock_idle_wakeup_event(0);
261 local_irq_restore(flags);
262}
263
264#ifdef CONFIG_CPU_FREQ
265
266/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
267 * changes.
268 *
269 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
270 * not that important because current Opteron setups do not support
271 * scaling on SMP anyroads.
272 *
273 * Should fix up last_tsc too. Currently gettimeofday in the
274 * first tick after the change will be slightly wrong.
275 */
276
277static unsigned int ref_freq;
278static unsigned long loops_per_jiffy_ref;
279static unsigned long tsc_khz_ref;
280
281static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
282 void *data)
283{
284 struct cpufreq_freqs *freq = data;
285 unsigned long *lpj, dummy;
286
287 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
288 return 0;
289
290 lpj = &dummy;
291 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
292#ifdef CONFIG_SMP
293 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
294#else
295 lpj = &boot_cpu_data.loops_per_jiffy;
296#endif
297
298 if (!ref_freq) {
299 ref_freq = freq->old;
300 loops_per_jiffy_ref = *lpj;
301 tsc_khz_ref = tsc_khz;
302 }
303 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
304 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
305 (val == CPUFREQ_RESUMECHANGE)) {
306 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
307
308 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
309 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
310 mark_tsc_unstable("cpufreq changes");
311 }
312
313 set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
314
315 return 0;
316}
317
318static struct notifier_block time_cpufreq_notifier_block = {
319 .notifier_call = time_cpufreq_notifier
320};
321
322static int __init cpufreq_tsc(void)
323{
324 cpufreq_register_notifier(&time_cpufreq_notifier_block,
325 CPUFREQ_TRANSITION_NOTIFIER);
326 return 0;
327}
328
329core_initcall(cpufreq_tsc);
330
331#endif /* CONFIG_CPU_FREQ */
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 40c0aafb358d..bbc153d36f84 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -18,119 +18,6 @@
18extern int tsc_unstable; 18extern int tsc_unstable;
19extern int tsc_disabled; 19extern int tsc_disabled;
20 20
21/* Accelerators for sched_clock()
22 * convert from cycles(64bits) => nanoseconds (64bits)
23 * basic equation:
24 * ns = cycles / (freq / ns_per_sec)
25 * ns = cycles * (ns_per_sec / freq)
26 * ns = cycles * (10^9 / (cpu_khz * 10^3))
27 * ns = cycles * (10^6 / cpu_khz)
28 *
29 * Then we use scaling math (suggested by george@mvista.com) to get:
30 * ns = cycles * (10^6 * SC / cpu_khz) / SC
31 * ns = cycles * cyc2ns_scale / SC
32 *
33 * And since SC is a constant power of two, we can convert the div
34 * into a shift.
35 *
36 * We can use khz divisor instead of mhz to keep a better precision, since
37 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
38 * (mathieu.desnoyers@polymtl.ca)
39 *
40 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
41 */
42
43DEFINE_PER_CPU(unsigned long, cyc2ns);
44
45void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
46{
47 unsigned long long tsc_now, ns_now;
48 unsigned long flags, *scale;
49
50 local_irq_save(flags);
51 sched_clock_idle_sleep_event();
52
53 scale = &per_cpu(cyc2ns, cpu);
54
55 rdtscll(tsc_now);
56 ns_now = __cycles_2_ns(tsc_now);
57
58 if (cpu_khz)
59 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
60
61 /*
62 * Start smoothly with the new frequency:
63 */
64 sched_clock_idle_wakeup_event(0);
65 local_irq_restore(flags);
66}
67
68#ifdef CONFIG_CPU_FREQ
69
70/*
71 * if the CPU frequency is scaled, TSC-based delays will need a different
72 * loops_per_jiffy value to function properly.
73 */
74static unsigned int ref_freq;
75static unsigned long loops_per_jiffy_ref;
76static unsigned long cpu_khz_ref;
77
78static int
79time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
80{
81 struct cpufreq_freqs *freq = data;
82
83 if (!ref_freq) {
84 if (!freq->old){
85 ref_freq = freq->new;
86 return 0;
87 }
88 ref_freq = freq->old;
89 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
90 cpu_khz_ref = cpu_khz;
91 }
92
93 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
94 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
95 (val == CPUFREQ_RESUMECHANGE)) {
96 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
97 cpu_data(freq->cpu).loops_per_jiffy =
98 cpufreq_scale(loops_per_jiffy_ref,
99 ref_freq, freq->new);
100
101 if (cpu_khz) {
102
103 if (num_online_cpus() == 1)
104 cpu_khz = cpufreq_scale(cpu_khz_ref,
105 ref_freq, freq->new);
106 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
107 tsc_khz = cpu_khz;
108 set_cyc2ns_scale(cpu_khz, freq->cpu);
109 /*
110 * TSC based sched_clock turns
111 * to junk w/ cpufreq
112 */
113 mark_tsc_unstable("cpufreq changes");
114 }
115 }
116 }
117
118 return 0;
119}
120
121static struct notifier_block time_cpufreq_notifier_block = {
122 .notifier_call = time_cpufreq_notifier
123};
124
125static int __init cpufreq_tsc(void)
126{
127 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
128 CPUFREQ_TRANSITION_NOTIFIER);
129}
130core_initcall(cpufreq_tsc);
131
132#endif
133
134/* clock source code */ 21/* clock source code */
135 22
136static struct clocksource clocksource_tsc; 23static struct clocksource clocksource_tsc;
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index c852ff9bd5d4..80a274b018c2 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -16,120 +16,6 @@
16extern int tsc_unstable; 16extern int tsc_unstable;
17extern int tsc_disabled; 17extern int tsc_disabled;
18 18
19/* Accelerators for sched_clock()
20 * convert from cycles(64bits) => nanoseconds (64bits)
21 * basic equation:
22 * ns = cycles / (freq / ns_per_sec)
23 * ns = cycles * (ns_per_sec / freq)
24 * ns = cycles * (10^9 / (cpu_khz * 10^3))
25 * ns = cycles * (10^6 / cpu_khz)
26 *
27 * Then we use scaling math (suggested by george@mvista.com) to get:
28 * ns = cycles * (10^6 * SC / cpu_khz) / SC
29 * ns = cycles * cyc2ns_scale / SC
30 *
31 * And since SC is a constant power of two, we can convert the div
32 * into a shift.
33 *
34 * We can use khz divisor instead of mhz to keep a better precision, since
35 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
36 * (mathieu.desnoyers@polymtl.ca)
37 *
38 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
39 */
40
41DEFINE_PER_CPU(unsigned long, cyc2ns);
42
43void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
44{
45 unsigned long long tsc_now, ns_now;
46 unsigned long flags, *scale;
47
48 local_irq_save(flags);
49 sched_clock_idle_sleep_event();
50
51 scale = &per_cpu(cyc2ns, cpu);
52
53 rdtscll(tsc_now);
54 ns_now = __cycles_2_ns(tsc_now);
55
56 if (cpu_khz)
57 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
58
59 sched_clock_idle_wakeup_event(0);
60 local_irq_restore(flags);
61}
62
63#ifdef CONFIG_CPU_FREQ
64
65/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
66 * changes.
67 *
68 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
69 * not that important because current Opteron setups do not support
70 * scaling on SMP anyroads.
71 *
72 * Should fix up last_tsc too. Currently gettimeofday in the
73 * first tick after the change will be slightly wrong.
74 */
75
76static unsigned int ref_freq;
77static unsigned long loops_per_jiffy_ref;
78static unsigned long tsc_khz_ref;
79
80static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
81 void *data)
82{
83 struct cpufreq_freqs *freq = data;
84 unsigned long *lpj, dummy;
85
86 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
87 return 0;
88
89 lpj = &dummy;
90 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
91#ifdef CONFIG_SMP
92 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
93#else
94 lpj = &boot_cpu_data.loops_per_jiffy;
95#endif
96
97 if (!ref_freq) {
98 ref_freq = freq->old;
99 loops_per_jiffy_ref = *lpj;
100 tsc_khz_ref = tsc_khz;
101 }
102 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
103 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
104 (val == CPUFREQ_RESUMECHANGE)) {
105 *lpj =
106 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
107
108 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
109 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
110 mark_tsc_unstable("cpufreq changes");
111 }
112
113 set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
114
115 return 0;
116}
117
118static struct notifier_block time_cpufreq_notifier_block = {
119 .notifier_call = time_cpufreq_notifier
120};
121
122static int __init cpufreq_tsc(void)
123{
124 cpufreq_register_notifier(&time_cpufreq_notifier_block,
125 CPUFREQ_TRANSITION_NOTIFIER);
126 return 0;
127}
128
129core_initcall(cpufreq_tsc);
130
131#endif
132
133/* 19/*
134 * Make an educated guess if the TSC is trustworthy and synchronized 20 * Make an educated guess if the TSC is trustworthy and synchronized
135 * over all CPUs. 21 * over all CPUs.