diff options
author | Guillaume Chazarain <guichaz@yahoo.fr> | 2008-01-30 07:30:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:06 -0500 |
commit | 53d517cdbaac704352b3d0c10fecb99e0b54572e (patch) | |
tree | 4056bc99a4e6077d7d968d30ea21895e425a83ef | |
parent | 83bd01024b1fdfc41d9b758e5669e80fca72df66 (diff) |
x86: scale cyc_2_nsec according to CPU frequency
scale the sched_clock() cyc_2_nsec scaling factor according to
CPU frequency changes.
[ mingo@elte.hu: simplified it and fixed it for SMP. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 43 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 57 | ||||
-rw-r--r-- | include/asm-x86/timer.h | 23 |
3 files changed, 102 insertions, 21 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index 9ebc0dab66b4..00bb4c1c0593 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/jiffies.h> | 5 | #include <linux/jiffies.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/dmi.h> | 7 | #include <linux/dmi.h> |
8 | #include <linux/percpu.h> | ||
8 | 9 | ||
9 | #include <asm/delay.h> | 10 | #include <asm/delay.h> |
10 | #include <asm/tsc.h> | 11 | #include <asm/tsc.h> |
@@ -80,13 +81,31 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable); | |||
80 | * | 81 | * |
81 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 82 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
82 | */ | 83 | */ |
83 | unsigned long cyc2ns_scale __read_mostly; | ||
84 | 84 | ||
85 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 85 | DEFINE_PER_CPU(unsigned long, cyc2ns); |
86 | 86 | ||
87 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | 87 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
88 | { | 88 | { |
89 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | 89 | unsigned long flags, prev_scale, *scale; |
90 | unsigned long long tsc_now, ns_now; | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | sched_clock_idle_sleep_event(); | ||
94 | |||
95 | scale = &per_cpu(cyc2ns, cpu); | ||
96 | |||
97 | rdtscll(tsc_now); | ||
98 | ns_now = __cycles_2_ns(tsc_now); | ||
99 | |||
100 | prev_scale = *scale; | ||
101 | if (cpu_khz) | ||
102 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
103 | |||
104 | /* | ||
105 | * Start smoothly with the new frequency: | ||
106 | */ | ||
107 | sched_clock_idle_wakeup_event(0); | ||
108 | local_irq_restore(flags); | ||
90 | } | 109 | } |
91 | 110 | ||
92 | /* | 111 | /* |
@@ -239,7 +258,9 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | |||
239 | ref_freq, freq->new); | 258 | ref_freq, freq->new); |
240 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | 259 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { |
241 | tsc_khz = cpu_khz; | 260 | tsc_khz = cpu_khz; |
242 | set_cyc2ns_scale(cpu_khz); | 261 | preempt_disable(); |
262 | set_cyc2ns_scale(cpu_khz, smp_processor_id()); | ||
263 | preempt_enable(); | ||
243 | /* | 264 | /* |
244 | * TSC based sched_clock turns | 265 | * TSC based sched_clock turns |
245 | * to junk w/ cpufreq | 266 | * to junk w/ cpufreq |
@@ -367,6 +388,8 @@ static inline void check_geode_tsc_reliable(void) { } | |||
367 | 388 | ||
368 | void __init tsc_init(void) | 389 | void __init tsc_init(void) |
369 | { | 390 | { |
391 | int cpu; | ||
392 | |||
370 | if (!cpu_has_tsc || tsc_disable) | 393 | if (!cpu_has_tsc || tsc_disable) |
371 | goto out_no_tsc; | 394 | goto out_no_tsc; |
372 | 395 | ||
@@ -380,7 +403,15 @@ void __init tsc_init(void) | |||
380 | (unsigned long)cpu_khz / 1000, | 403 | (unsigned long)cpu_khz / 1000, |
381 | (unsigned long)cpu_khz % 1000); | 404 | (unsigned long)cpu_khz % 1000); |
382 | 405 | ||
383 | set_cyc2ns_scale(cpu_khz); | 406 | /* |
407 | * Secondary CPUs do not run through tsc_init(), so set up | ||
408 | * all the scale factors for all CPUs, assuming the same | ||
409 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
410 | * up if their speed diverges) | ||
411 | */ | ||
412 | for_each_possible_cpu(cpu) | ||
413 | set_cyc2ns_scale(cpu_khz, cpu); | ||
414 | |||
384 | use_tsc_delay(); | 415 | use_tsc_delay(); |
385 | 416 | ||
386 | /* Check and install the TSC clocksource */ | 417 | /* Check and install the TSC clocksource */ |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 9c70af45b42b..32edd2c50e94 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <asm/hpet.h> | 11 | #include <asm/hpet.h> |
12 | #include <asm/timex.h> | 12 | #include <asm/timex.h> |
13 | #include <asm/timer.h> | ||
13 | 14 | ||
14 | static int notsc __initdata = 0; | 15 | static int notsc __initdata = 0; |
15 | 16 | ||
@@ -18,16 +19,48 @@ EXPORT_SYMBOL(cpu_khz); | |||
18 | unsigned int tsc_khz; | 19 | unsigned int tsc_khz; |
19 | EXPORT_SYMBOL(tsc_khz); | 20 | EXPORT_SYMBOL(tsc_khz); |
20 | 21 | ||
21 | static unsigned int cyc2ns_scale __read_mostly; | 22 | /* Accelerators for sched_clock() |
23 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
24 | * basic equation: | ||
25 | * ns = cycles / (freq / ns_per_sec) | ||
26 | * ns = cycles * (ns_per_sec / freq) | ||
27 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
28 | * ns = cycles * (10^6 / cpu_khz) | ||
29 | * | ||
30 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
31 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
32 | * ns = cycles * cyc2ns_scale / SC | ||
33 | * | ||
34 | * And since SC is a constant power of two, we can convert the div | ||
35 | * into a shift. | ||
36 | * | ||
37 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
38 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
39 | * (mathieu.desnoyers@polymtl.ca) | ||
40 | * | ||
41 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
42 | */ | ||
43 | DEFINE_PER_CPU(unsigned long, cyc2ns); | ||
22 | 44 | ||
23 | static inline void set_cyc2ns_scale(unsigned long khz) | 45 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
24 | { | 46 | { |
25 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; | 47 | unsigned long flags, prev_scale, *scale; |
26 | } | 48 | unsigned long long tsc_now, ns_now; |
27 | 49 | ||
28 | static unsigned long long cycles_2_ns(unsigned long long cyc) | 50 | local_irq_save(flags); |
29 | { | 51 | sched_clock_idle_sleep_event(); |
30 | return (cyc * cyc2ns_scale) >> NS_SCALE; | 52 | |
53 | scale = &per_cpu(cyc2ns, cpu); | ||
54 | |||
55 | rdtscll(tsc_now); | ||
56 | ns_now = __cycles_2_ns(tsc_now); | ||
57 | |||
58 | prev_scale = *scale; | ||
59 | if (cpu_khz) | ||
60 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
61 | |||
62 | sched_clock_idle_wakeup_event(0); | ||
63 | local_irq_restore(flags); | ||
31 | } | 64 | } |
32 | 65 | ||
33 | unsigned long long sched_clock(void) | 66 | unsigned long long sched_clock(void) |
@@ -100,7 +133,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
100 | mark_tsc_unstable("cpufreq changes"); | 133 | mark_tsc_unstable("cpufreq changes"); |
101 | } | 134 | } |
102 | 135 | ||
103 | set_cyc2ns_scale(tsc_khz_ref); | 136 | preempt_disable(); |
137 | set_cyc2ns_scale(tsc_khz_ref, smp_processor_id()); | ||
138 | preempt_enable(); | ||
104 | 139 | ||
105 | return 0; | 140 | return 0; |
106 | } | 141 | } |
@@ -151,7 +186,7 @@ static unsigned long __init tsc_read_refs(unsigned long *pm, | |||
151 | void __init tsc_calibrate(void) | 186 | void __init tsc_calibrate(void) |
152 | { | 187 | { |
153 | unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; | 188 | unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2; |
154 | int hpet = is_hpet_enabled(); | 189 | int hpet = is_hpet_enabled(), cpu; |
155 | 190 | ||
156 | local_irq_save(flags); | 191 | local_irq_save(flags); |
157 | 192 | ||
@@ -206,7 +241,9 @@ void __init tsc_calibrate(void) | |||
206 | } | 241 | } |
207 | 242 | ||
208 | tsc_khz = tsc2 / tsc1; | 243 | tsc_khz = tsc2 / tsc1; |
209 | set_cyc2ns_scale(tsc_khz); | 244 | |
245 | for_each_possible_cpu(cpu) | ||
246 | set_cyc2ns_scale(tsc_khz, cpu); | ||
210 | } | 247 | } |
211 | 248 | ||
212 | /* | 249 | /* |
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h index 0db7e994fb8b..4f6fcb050c11 100644 --- a/include/asm-x86/timer.h +++ b/include/asm-x86/timer.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASMi386_TIMER_H | 2 | #define _ASMi386_TIMER_H |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/percpu.h> | ||
5 | 6 | ||
6 | #define TICK_SIZE (tick_nsec / 1000) | 7 | #define TICK_SIZE (tick_nsec / 1000) |
7 | 8 | ||
@@ -16,7 +17,7 @@ extern int recalibrate_cpu_khz(void); | |||
16 | #define calculate_cpu_khz() native_calculate_cpu_khz() | 17 | #define calculate_cpu_khz() native_calculate_cpu_khz() |
17 | #endif | 18 | #endif |
18 | 19 | ||
19 | /* Accellerators for sched_clock() | 20 | /* Accelerators for sched_clock() |
20 | * convert from cycles(64bits) => nanoseconds (64bits) | 21 | * convert from cycles(64bits) => nanoseconds (64bits) |
21 | * basic equation: | 22 | * basic equation: |
22 | * ns = cycles / (freq / ns_per_sec) | 23 | * ns = cycles / (freq / ns_per_sec) |
@@ -31,20 +32,32 @@ extern int recalibrate_cpu_khz(void); | |||
31 | * And since SC is a constant power of two, we can convert the div | 32 | * And since SC is a constant power of two, we can convert the div |
32 | * into a shift. | 33 | * into a shift. |
33 | * | 34 | * |
34 | * We can use khz divisor instead of mhz to keep a better percision, since | 35 | * We can use khz divisor instead of mhz to keep a better precision, since |
35 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | 36 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. |
36 | * (mathieu.desnoyers@polymtl.ca) | 37 | * (mathieu.desnoyers@polymtl.ca) |
37 | * | 38 | * |
38 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 39 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
39 | */ | 40 | */ |
40 | extern unsigned long cyc2ns_scale __read_mostly; | 41 | |
42 | DECLARE_PER_CPU(unsigned long, cyc2ns); | ||
41 | 43 | ||
42 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 44 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
43 | 45 | ||
44 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 46 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
45 | { | 47 | { |
46 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | 48 | return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; |
47 | } | 49 | } |
48 | 50 | ||
51 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
52 | { | ||
53 | unsigned long long ns; | ||
54 | unsigned long flags; | ||
55 | |||
56 | local_irq_save(flags); | ||
57 | ns = __cycles_2_ns(cyc); | ||
58 | local_irq_restore(flags); | ||
59 | |||
60 | return ns; | ||
61 | } | ||
49 | 62 | ||
50 | #endif | 63 | #endif |