aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/tsc.c50
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--kernel/timer.c2
3 files changed, 17 insertions, 37 deletions
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index a37654991788..b4b2be21d1c7 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void)
60 return tsc_unstable; 60 return tsc_unstable;
61} 61}
62 62
63void mark_tsc_unstable(void)
64{
65 tsc_unstable = 1;
66}
67EXPORT_SYMBOL_GPL(mark_tsc_unstable);
68
69/* Accellerators for sched_clock() 63/* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits) 64 * convert from cycles(64bits) => nanoseconds (64bits)
71 * basic equation: 65 * basic equation:
@@ -295,7 +289,6 @@ core_initcall(cpufreq_tsc);
295/* clock source code */ 289/* clock source code */
296 290
297static unsigned long current_tsc_khz = 0; 291static unsigned long current_tsc_khz = 0;
298static int tsc_update_callback(void);
299 292
300static cycle_t read_tsc(void) 293static cycle_t read_tsc(void)
301{ 294{
@@ -313,37 +306,28 @@ static struct clocksource clocksource_tsc = {
313 .mask = CLOCKSOURCE_MASK(64), 306 .mask = CLOCKSOURCE_MASK(64),
314 .mult = 0, /* to be set */ 307 .mult = 0, /* to be set */
315 .shift = 22, 308 .shift = 22,
316 .update_callback = tsc_update_callback,
317 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 309 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
318 CLOCK_SOURCE_MUST_VERIFY, 310 CLOCK_SOURCE_MUST_VERIFY,
319}; 311};
320 312
321static int tsc_update_callback(void) 313void mark_tsc_unstable(void)
322{ 314{
323 int change = 0; 315 if (!tsc_unstable) {
324 316 tsc_unstable = 1;
325 /* check to see if we should switch to the safe clocksource: */ 317 /* Can be called before registration */
326 if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { 318 if (clocksource_tsc.mult)
327 clocksource_change_rating(&clocksource_tsc, 0); 319 clocksource_change_rating(&clocksource_tsc, 0);
328 change = 1; 320 else
329 } 321 clocksource_tsc.rating = 0;
330
331 /* only update if tsc_khz has changed: */
332 if (current_tsc_khz != tsc_khz) {
333 current_tsc_khz = tsc_khz;
334 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
335 clocksource_tsc.shift);
336 change = 1;
337 } 322 }
338
339 return change;
340} 323}
324EXPORT_SYMBOL_GPL(mark_tsc_unstable);
341 325
342static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) 326static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
343{ 327{
344 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", 328 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
345 d->ident); 329 d->ident);
346 mark_tsc_unstable(); 330 tsc_unstable = 1;
347 return 0; 331 return 0;
348} 332}
349 333
@@ -415,11 +399,12 @@ __cpuinit int unsynchronized_tsc(void)
415 * Intel systems are normally all synchronized. 399 * Intel systems are normally all synchronized.
416 * Exceptions must mark TSC as unstable: 400 * Exceptions must mark TSC as unstable:
417 */ 401 */
418 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 402 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
419 return 0; 403 /* assume multi socket systems are not synchronized: */
420 404 if (num_possible_cpus() > 1)
421 /* assume multi socket systems are not synchronized: */ 405 tsc_unstable = 1;
422 return num_possible_cpus() > 1; 406 }
407 return tsc_unstable;
423} 408}
424 409
425static int __init init_tsc_clocksource(void) 410static int __init init_tsc_clocksource(void)
@@ -429,8 +414,7 @@ static int __init init_tsc_clocksource(void)
429 /* check blacklist */ 414 /* check blacklist */
430 dmi_check_system(bad_tsc_dmi_table); 415 dmi_check_system(bad_tsc_dmi_table);
431 416
432 if (unsynchronized_tsc()) /* mark unstable if unsynced */ 417 unsynchronized_tsc();
433 mark_tsc_unstable();
434 current_tsc_khz = tsc_khz; 418 current_tsc_khz = tsc_khz;
435 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, 419 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
436 clocksource_tsc.shift); 420 clocksource_tsc.shift);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 45b0c310ae82..a585a29fe7c4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -44,7 +44,6 @@ typedef u64 cycle_t;
44 * subtraction of non 64 bit counters 44 * subtraction of non 64 bit counters
45 * @mult: cycle to nanosecond multiplier 45 * @mult: cycle to nanosecond multiplier
46 * @shift: cycle to nanosecond divisor (power of two) 46 * @shift: cycle to nanosecond divisor (power of two)
47 * @update_callback: called when safe to alter clocksource values
48 * @flags: flags describing special properties 47 * @flags: flags describing special properties
49 * @cycle_interval: Used internally by timekeeping core, please ignore. 48 * @cycle_interval: Used internally by timekeeping core, please ignore.
50 * @xtime_interval: Used internally by timekeeping core, please ignore. 49 * @xtime_interval: Used internally by timekeeping core, please ignore.
@@ -57,7 +56,6 @@ struct clocksource {
57 cycle_t mask; 56 cycle_t mask;
58 u32 mult; 57 u32 mult;
59 u32 shift; 58 u32 shift;
60 int (*update_callback)(void);
61 unsigned long flags; 59 unsigned long flags;
62 60
63 /* timekeeping specific data, ignore */ 61 /* timekeeping specific data, ignore */
diff --git a/kernel/timer.c b/kernel/timer.c
index 1144ff2c2ea3..4b088fcadb3f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -848,8 +848,6 @@ static int change_clocksource(void)
848 printk(KERN_INFO "Time: %s clocksource has been installed.\n", 848 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
849 clock->name); 849 clock->name);
850 return 1; 850 return 1;
851 } else if (clock->update_callback) {
852 return clock->update_callback();
853 } 851 }
854 return 0; 852 return 0;
855} 853}