aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorGuillaume Chazarain <guichaz@yahoo.fr>2008-01-30 07:30:06 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:06 -0500
commit53d517cdbaac704352b3d0c10fecb99e0b54572e (patch)
tree4056bc99a4e6077d7d968d30ea21895e425a83ef /arch/x86/kernel
parent83bd01024b1fdfc41d9b758e5669e80fca72df66 (diff)
x86: scale cyc_2_nsec according to CPU frequency
scale the sched_clock() cyc_2_nsec scaling factor according to CPU frequency changes. [ mingo@elte.hu: simplified it and fixed it for SMP. ] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/tsc_32.c43
-rw-r--r--arch/x86/kernel/tsc_64.c57
2 files changed, 84 insertions, 16 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 9ebc0dab66b4..00bb4c1c0593 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -5,6 +5,7 @@
5#include <linux/jiffies.h> 5#include <linux/jiffies.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/dmi.h> 7#include <linux/dmi.h>
8#include <linux/percpu.h>
8 9
9#include <asm/delay.h> 10#include <asm/delay.h>
10#include <asm/tsc.h> 11#include <asm/tsc.h>
@@ -80,13 +81,31 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
80 * 81 *
81 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 82 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
82 */ 83 */
83unsigned long cyc2ns_scale __read_mostly;
84 84
85#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 85DEFINE_PER_CPU(unsigned long, cyc2ns);
86 86
87static inline void set_cyc2ns_scale(unsigned long cpu_khz) 87static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
88{ 88{
89 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; 89 unsigned long flags, prev_scale, *scale;
90 unsigned long long tsc_now, ns_now;
91
92 local_irq_save(flags);
93 sched_clock_idle_sleep_event();
94
95 scale = &per_cpu(cyc2ns, cpu);
96
97 rdtscll(tsc_now);
98 ns_now = __cycles_2_ns(tsc_now);
99
100 prev_scale = *scale;
101 if (cpu_khz)
102 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
103
104 /*
105 * Start smoothly with the new frequency:
106 */
107 sched_clock_idle_wakeup_event(0);
108 local_irq_restore(flags);
90} 109}
91 110
92/* 111/*
@@ -239,7 +258,9 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
239 ref_freq, freq->new); 258 ref_freq, freq->new);
240 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { 259 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
241 tsc_khz = cpu_khz; 260 tsc_khz = cpu_khz;
242 set_cyc2ns_scale(cpu_khz); 261 preempt_disable();
262 set_cyc2ns_scale(cpu_khz, smp_processor_id());
263 preempt_enable();
243 /* 264 /*
244 * TSC based sched_clock turns 265 * TSC based sched_clock turns
245 * to junk w/ cpufreq 266 * to junk w/ cpufreq
@@ -367,6 +388,8 @@ static inline void check_geode_tsc_reliable(void) { }
367 388
368void __init tsc_init(void) 389void __init tsc_init(void)
369{ 390{
391 int cpu;
392
370 if (!cpu_has_tsc || tsc_disable) 393 if (!cpu_has_tsc || tsc_disable)
371 goto out_no_tsc; 394 goto out_no_tsc;
372 395
@@ -380,7 +403,15 @@ void __init tsc_init(void)
380 (unsigned long)cpu_khz / 1000, 403 (unsigned long)cpu_khz / 1000,
381 (unsigned long)cpu_khz % 1000); 404 (unsigned long)cpu_khz % 1000);
382 405
383 set_cyc2ns_scale(cpu_khz); 406 /*
407 * Secondary CPUs do not run through tsc_init(), so set up
408 * all the scale factors for all CPUs, assuming the same
409 * speed as the bootup CPU. (cpufreq notifiers will fix this
410 * up if their speed diverges)
411 */
412 for_each_possible_cpu(cpu)
413 set_cyc2ns_scale(cpu_khz, cpu);
414
384 use_tsc_delay(); 415 use_tsc_delay();
385 416
386 /* Check and install the TSC clocksource */ 417 /* Check and install the TSC clocksource */
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9c70af45b42b..32edd2c50e94 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -10,6 +10,7 @@
10 10
11#include <asm/hpet.h> 11#include <asm/hpet.h>
12#include <asm/timex.h> 12#include <asm/timex.h>
13#include <asm/timer.h>
13 14
14static int notsc __initdata = 0; 15static int notsc __initdata = 0;
15 16
@@ -18,16 +19,48 @@ EXPORT_SYMBOL(cpu_khz);
18unsigned int tsc_khz; 19unsigned int tsc_khz;
19EXPORT_SYMBOL(tsc_khz); 20EXPORT_SYMBOL(tsc_khz);
20 21
21static unsigned int cyc2ns_scale __read_mostly; 22/* Accelerators for sched_clock()
23 * convert from cycles(64bits) => nanoseconds (64bits)
24 * basic equation:
25 * ns = cycles / (freq / ns_per_sec)
26 * ns = cycles * (ns_per_sec / freq)
27 * ns = cycles * (10^9 / (cpu_khz * 10^3))
28 * ns = cycles * (10^6 / cpu_khz)
29 *
30 * Then we use scaling math (suggested by george@mvista.com) to get:
31 * ns = cycles * (10^6 * SC / cpu_khz) / SC
32 * ns = cycles * cyc2ns_scale / SC
33 *
34 * And since SC is a constant power of two, we can convert the div
35 * into a shift.
36 *
37 * We can use khz divisor instead of mhz to keep a better precision, since
38 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
39 * (mathieu.desnoyers@polymtl.ca)
40 *
41 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
42 */
43DEFINE_PER_CPU(unsigned long, cyc2ns);
22 44
23static inline void set_cyc2ns_scale(unsigned long khz) 45static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
24{ 46{
25 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; 47 unsigned long flags, prev_scale, *scale;
26} 48 unsigned long long tsc_now, ns_now;
27 49
28static unsigned long long cycles_2_ns(unsigned long long cyc) 50 local_irq_save(flags);
29{ 51 sched_clock_idle_sleep_event();
30 return (cyc * cyc2ns_scale) >> NS_SCALE; 52
53 scale = &per_cpu(cyc2ns, cpu);
54
55 rdtscll(tsc_now);
56 ns_now = __cycles_2_ns(tsc_now);
57
58 prev_scale = *scale;
59 if (cpu_khz)
60 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
61
62 sched_clock_idle_wakeup_event(0);
63 local_irq_restore(flags);
31} 64}
32 65
33unsigned long long sched_clock(void) 66unsigned long long sched_clock(void)
@@ -100,7 +133,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
100 mark_tsc_unstable("cpufreq changes"); 133 mark_tsc_unstable("cpufreq changes");
101 } 134 }
102 135
103 set_cyc2ns_scale(tsc_khz_ref); 136 preempt_disable();
137 set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
138 preempt_enable();
104 139
105 return 0; 140 return 0;
106} 141}
@@ -151,7 +186,7 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
151void __init tsc_calibrate(void) 186void __init tsc_calibrate(void)
152{ 187{
153 unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; 188 unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
154 int hpet = is_hpet_enabled(); 189 int hpet = is_hpet_enabled(), cpu;
155 190
156 local_irq_save(flags); 191 local_irq_save(flags);
157 192
@@ -206,7 +241,9 @@ void __init tsc_calibrate(void)
206 } 241 }
207 242
208 tsc_khz = tsc2 / tsc1; 243 tsc_khz = tsc2 / tsc1;
209 set_cyc2ns_scale(tsc_khz); 244
245 for_each_possible_cpu(cpu)
246 set_cyc2ns_scale(tsc_khz, cpu);
210} 247}
211 248
212/* 249/*