aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/tsc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-02-13 07:26:22 -0500
committerAndi Kleen <andi@basil.nowhere.org>2007-02-13 07:26:22 -0500
commitf9690982b8c2f9a2c65acdc113e758ec356676a3 (patch)
treee9f486276e878505b267a67fcfaa279b9539e54e /arch/i386/kernel/tsc.c
parent2ff2d3d74705d34ab71b21f54634fcf50d57bdd5 (diff)
[PATCH] i386: improve sched_clock() on i686
Clean up sched_clock() on i686: it will use the TSC if available and falls back to jiffies only if the user asked for it to be disabled via notsc or the CPU calibration code didnt figure out the right cpu_khz. This generally makes the scheduler timestamps more finegrained, on all hardware. (the current scheduler is pretty resistant against asynchronous sched_clock() values on different CPUs, it will allow at most up to a jiffy of jitter.) Also simplify sched_clock()'s check for TSC availability: propagate the desire and ability to use the TSC into the tsc_disable flag, previously this flag only indicated whether the notsc option was passed. This makes the rare low-res sched_clock() codepath a single branch off a read-mostly flag. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel/tsc.c')
-rw-r--r--arch/i386/kernel/tsc.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index 12fef14995a5..46f752a8bbf3 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -112,13 +112,10 @@ unsigned long long sched_clock(void)
112 return (*custom_sched_clock)(); 112 return (*custom_sched_clock)();
113 113
114 /* 114 /*
115 * in the NUMA case we dont use the TSC as they are not 115 * Fall back to jiffies if there's no TSC available:
116 * synchronized across all CPUs.
117 */ 116 */
118#ifndef CONFIG_NUMA 117 if (unlikely(tsc_disable))
119 if (!cpu_khz || check_tsc_unstable()) 118 /* No locking but a rare wrong value is not a big deal: */
120#endif
121 /* no locking but a rare wrong value is not a big deal */
122 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 119 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
123 120
124 /* read the Time Stamp Counter: */ 121 /* read the Time Stamp Counter: */
@@ -198,13 +195,13 @@ EXPORT_SYMBOL(recalibrate_cpu_khz);
198void __init tsc_init(void) 195void __init tsc_init(void)
199{ 196{
200 if (!cpu_has_tsc || tsc_disable) 197 if (!cpu_has_tsc || tsc_disable)
201 return; 198 goto out_no_tsc;
202 199
203 cpu_khz = calculate_cpu_khz(); 200 cpu_khz = calculate_cpu_khz();
204 tsc_khz = cpu_khz; 201 tsc_khz = cpu_khz;
205 202
206 if (!cpu_khz) 203 if (!cpu_khz)
207 return; 204 goto out_no_tsc;
208 205
209 printk("Detected %lu.%03lu MHz processor.\n", 206 printk("Detected %lu.%03lu MHz processor.\n",
210 (unsigned long)cpu_khz / 1000, 207 (unsigned long)cpu_khz / 1000,
@@ -212,6 +209,15 @@ void __init tsc_init(void)
212 209
213 set_cyc2ns_scale(cpu_khz); 210 set_cyc2ns_scale(cpu_khz);
214 use_tsc_delay(); 211 use_tsc_delay();
212 return;
213
214out_no_tsc:
215 /*
216 * Set the tsc_disable flag if there's no TSC support, this
217 * makes it a fast flag for the kernel to see whether it
218 * should be using the TSC.
219 */
220 tsc_disable = 1;
215} 221}
216 222
217#ifdef CONFIG_CPU_FREQ 223#ifdef CONFIG_CPU_FREQ