diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-02-16 04:27:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-16 11:13:57 -0500 |
commit | 7e69f2b1ead2a4c51c12817f18263ff0e59335a6 (patch) | |
tree | b00295fb1983dd0624b94a613c8661c1bd995a29 /arch/i386/kernel/tsc.c | |
parent | 877fe38029366c19def24359627db8cc24d9fef6 (diff) |
[PATCH] clocksource: Remove the update callback
The clocksource code allows direct updates of the rating of a given
clocksource now. Change TSC unstable tracking to use this interface and
remove the update callback.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel/tsc.c')
-rw-r--r-- | arch/i386/kernel/tsc.c | 50 |
1 files changed, 17 insertions, 33 deletions
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index a37654991788..b4b2be21d1c7 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void) | |||
60 | return tsc_unstable; | 60 | return tsc_unstable; |
61 | } | 61 | } |
62 | 62 | ||
63 | void mark_tsc_unstable(void) | ||
64 | { | ||
65 | tsc_unstable = 1; | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
68 | |||
69 | /* Accellerators for sched_clock() | 63 | /* Accellerators for sched_clock() |
70 | * convert from cycles(64bits) => nanoseconds (64bits) | 64 | * convert from cycles(64bits) => nanoseconds (64bits) |
71 | * basic equation: | 65 | * basic equation: |
@@ -295,7 +289,6 @@ core_initcall(cpufreq_tsc); | |||
295 | /* clock source code */ | 289 | /* clock source code */ |
296 | 290 | ||
297 | static unsigned long current_tsc_khz = 0; | 291 | static unsigned long current_tsc_khz = 0; |
298 | static int tsc_update_callback(void); | ||
299 | 292 | ||
300 | static cycle_t read_tsc(void) | 293 | static cycle_t read_tsc(void) |
301 | { | 294 | { |
@@ -313,37 +306,28 @@ static struct clocksource clocksource_tsc = { | |||
313 | .mask = CLOCKSOURCE_MASK(64), | 306 | .mask = CLOCKSOURCE_MASK(64), |
314 | .mult = 0, /* to be set */ | 307 | .mult = 0, /* to be set */ |
315 | .shift = 22, | 308 | .shift = 22, |
316 | .update_callback = tsc_update_callback, | ||
317 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | 309 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
318 | CLOCK_SOURCE_MUST_VERIFY, | 310 | CLOCK_SOURCE_MUST_VERIFY, |
319 | }; | 311 | }; |
320 | 312 | ||
321 | static int tsc_update_callback(void) | 313 | void mark_tsc_unstable(void) |
322 | { | 314 | { |
323 | int change = 0; | 315 | if (!tsc_unstable) { |
324 | 316 | tsc_unstable = 1; | |
325 | /* check to see if we should switch to the safe clocksource: */ | 317 | /* Can be called before registration */ |
326 | if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { | 318 | if (clocksource_tsc.mult) |
327 | clocksource_change_rating(&clocksource_tsc, 0); | 319 | clocksource_change_rating(&clocksource_tsc, 0); |
328 | change = 1; | 320 | else |
329 | } | 321 | clocksource_tsc.rating = 0; |
330 | |||
331 | /* only update if tsc_khz has changed: */ | ||
332 | if (current_tsc_khz != tsc_khz) { | ||
333 | current_tsc_khz = tsc_khz; | ||
334 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
335 | clocksource_tsc.shift); | ||
336 | change = 1; | ||
337 | } | 322 | } |
338 | |||
339 | return change; | ||
340 | } | 323 | } |
324 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
341 | 325 | ||
342 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) | 326 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) |
343 | { | 327 | { |
344 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | 328 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", |
345 | d->ident); | 329 | d->ident); |
346 | mark_tsc_unstable(); | 330 | tsc_unstable = 1; |
347 | return 0; | 331 | return 0; |
348 | } | 332 | } |
349 | 333 | ||
@@ -415,11 +399,12 @@ __cpuinit int unsynchronized_tsc(void) | |||
415 | * Intel systems are normally all synchronized. | 399 | * Intel systems are normally all synchronized. |
416 | * Exceptions must mark TSC as unstable: | 400 | * Exceptions must mark TSC as unstable: |
417 | */ | 401 | */ |
418 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | 402 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
419 | return 0; | 403 | /* assume multi socket systems are not synchronized: */ |
420 | 404 | if (num_possible_cpus() > 1) | |
421 | /* assume multi socket systems are not synchronized: */ | 405 | tsc_unstable = 1; |
422 | return num_possible_cpus() > 1; | 406 | } |
407 | return tsc_unstable; | ||
423 | } | 408 | } |
424 | 409 | ||
425 | static int __init init_tsc_clocksource(void) | 410 | static int __init init_tsc_clocksource(void) |
@@ -429,8 +414,7 @@ static int __init init_tsc_clocksource(void) | |||
429 | /* check blacklist */ | 414 | /* check blacklist */ |
430 | dmi_check_system(bad_tsc_dmi_table); | 415 | dmi_check_system(bad_tsc_dmi_table); |
431 | 416 | ||
432 | if (unsynchronized_tsc()) /* mark unstable if unsynced */ | 417 | unsynchronized_tsc(); |
433 | mark_tsc_unstable(); | ||
434 | current_tsc_khz = tsc_khz; | 418 | current_tsc_khz = tsc_khz; |
435 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | 419 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, |
436 | clocksource_tsc.shift); | 420 | clocksource_tsc.shift); |