diff options
author | john stultz <johnstul@us.ibm.com> | 2006-06-26 03:25:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:21 -0400 |
commit | 5d0cf410e94b1f1ff852c3f210d22cc6c5a27ffa (patch) | |
tree | a30cd6d201295945f401fd1f2731493f68db9ee9 /arch/i386 | |
parent | 61743fe445213b87fb55a389c8d073785323ca3e (diff) |
[PATCH] Time: i386 Clocksource Drivers
Implement the time sources for i386 (acpi_pm, cyclone, hpet, pit, and tsc).
With this patch, the conversion of the i386 arch to the generic timekeeping
code should be complete.
The patch should be fairly straight forward, only adding the new clocksources.
[hirofumi@mail.parknet.co.jp: acpi_pm cleanup]
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/i386/kernel/hpet.c | 67 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 53 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 161 |
5 files changed, 282 insertions, 3 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index f238cb6274eb..0fac85df64f1 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o | |||
36 | obj-$(CONFIG_DOUBLEFAULT) += doublefault.o | 36 | obj-$(CONFIG_DOUBLEFAULT) += doublefault.o |
37 | obj-$(CONFIG_VM86) += vm86.o | 37 | obj-$(CONFIG_VM86) += vm86.o |
38 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 38 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
39 | obj-$(CONFIG_HPET_TIMER) += hpet.o | ||
39 | 40 | ||
40 | EXTRA_AFLAGS := -traditional | 41 | EXTRA_AFLAGS := -traditional |
41 | 42 | ||
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c new file mode 100644 index 000000000000..91a5bdd9f604 --- /dev/null +++ b/arch/i386/kernel/hpet.c | |||
@@ -0,0 +1,67 @@ | |||
1 | #include <linux/clocksource.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/hpet.h> | ||
4 | #include <linux/init.h> | ||
5 | |||
6 | #include <asm/hpet.h> | ||
7 | #include <asm/io.h> | ||
8 | |||
9 | #define HPET_MASK 0xFFFFFFFF | ||
10 | #define HPET_SHIFT 22 | ||
11 | |||
12 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
13 | #define FSEC_PER_NSEC 1000000 | ||
14 | |||
15 | static void *hpet_ptr; | ||
16 | |||
17 | static cycle_t read_hpet(void) | ||
18 | { | ||
19 | return (cycle_t)readl(hpet_ptr); | ||
20 | } | ||
21 | |||
22 | static struct clocksource clocksource_hpet = { | ||
23 | .name = "hpet", | ||
24 | .rating = 250, | ||
25 | .read = read_hpet, | ||
26 | .mask = (cycle_t)HPET_MASK, | ||
27 | .mult = 0, /* set below */ | ||
28 | .shift = HPET_SHIFT, | ||
29 | .is_continuous = 1, | ||
30 | }; | ||
31 | |||
32 | static int __init init_hpet_clocksource(void) | ||
33 | { | ||
34 | unsigned long hpet_period; | ||
35 | void __iomem* hpet_base; | ||
36 | u64 tmp; | ||
37 | |||
38 | if (!hpet_address) | ||
39 | return -ENODEV; | ||
40 | |||
41 | /* calculate the hpet address: */ | ||
42 | hpet_base = | ||
43 | (void __iomem*)ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | ||
44 | hpet_ptr = hpet_base + HPET_COUNTER; | ||
45 | |||
46 | /* calculate the frequency: */ | ||
47 | hpet_period = readl(hpet_base + HPET_PERIOD); | ||
48 | |||
49 | /* | ||
50 | * hpet period is in femto seconds per cycle | ||
51 | * so we need to convert this to ns/cyc units | ||
52 | * aproximated by mult/2^shift | ||
53 | * | ||
54 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
55 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
56 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
57 | * (fsec/cyc << shift)/1000000 = mult | ||
58 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
59 | */ | ||
60 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
61 | do_div(tmp, FSEC_PER_NSEC); | ||
62 | clocksource_hpet.mult = (u32)tmp; | ||
63 | |||
64 | return register_clocksource(&clocksource_hpet); | ||
65 | } | ||
66 | |||
67 | module_init(init_hpet_clocksource); | ||
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 29cb2eb34363..a276bceade68 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * i8253.c 8253/PIT functions | 2 | * i8253.c 8253/PIT functions |
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | #include <linux/clocksource.h> | ||
5 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
6 | #include <linux/jiffies.h> | 7 | #include <linux/jiffies.h> |
7 | #include <linux/sysdev.h> | 8 | #include <linux/sysdev.h> |
@@ -30,3 +31,55 @@ void setup_pit_timer(void) | |||
30 | outb(LATCH >> 8 , PIT_CH0); /* MSB */ | 31 | outb(LATCH >> 8 , PIT_CH0); /* MSB */ |
31 | spin_unlock_irqrestore(&i8253_lock, flags); | 32 | spin_unlock_irqrestore(&i8253_lock, flags); |
32 | } | 33 | } |
34 | |||
35 | /* | ||
36 | * Since the PIT overflows every tick, its not very useful | ||
37 | * to just read by itself. So use jiffies to emulate a free | ||
38 | * running counter: | ||
39 | */ | ||
40 | static cycle_t pit_read(void) | ||
41 | { | ||
42 | unsigned long flags; | ||
43 | int count; | ||
44 | u64 jifs; | ||
45 | |||
46 | spin_lock_irqsave(&i8253_lock, flags); | ||
47 | outb_p(0x00, PIT_MODE); /* latch the count ASAP */ | ||
48 | count = inb_p(PIT_CH0); /* read the latched count */ | ||
49 | count |= inb_p(PIT_CH0) << 8; | ||
50 | |||
51 | /* VIA686a test code... reset the latch if count > max + 1 */ | ||
52 | if (count > LATCH) { | ||
53 | outb_p(0x34, PIT_MODE); | ||
54 | outb_p(LATCH & 0xff, PIT_CH0); | ||
55 | outb(LATCH >> 8, PIT_CH0); | ||
56 | count = LATCH - 1; | ||
57 | } | ||
58 | spin_unlock_irqrestore(&i8253_lock, flags); | ||
59 | |||
60 | jifs = jiffies_64; | ||
61 | |||
62 | jifs -= INITIAL_JIFFIES; | ||
63 | count = (LATCH-1) - count; | ||
64 | |||
65 | return (cycle_t)(jifs * LATCH) + count; | ||
66 | } | ||
67 | |||
68 | static struct clocksource clocksource_pit = { | ||
69 | .name = "pit", | ||
70 | .rating = 110, | ||
71 | .read = pit_read, | ||
72 | .mask = (cycle_t)-1, | ||
73 | .mult = 0, | ||
74 | .shift = 20, | ||
75 | }; | ||
76 | |||
77 | static int __init init_pit_clocksource(void) | ||
78 | { | ||
79 | if (num_possible_cpus() > 4) /* PIT does not scale! */ | ||
80 | return 0; | ||
81 | |||
82 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | ||
83 | return register_clocksource(&clocksource_pit); | ||
84 | } | ||
85 | module_init(init_pit_clocksource); | ||
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 2a6ab86ffc15..5f43d0410122 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -82,9 +82,6 @@ extern unsigned long wall_jiffies; | |||
82 | DEFINE_SPINLOCK(rtc_lock); | 82 | DEFINE_SPINLOCK(rtc_lock); |
83 | EXPORT_SYMBOL(rtc_lock); | 83 | EXPORT_SYMBOL(rtc_lock); |
84 | 84 | ||
85 | /* XXX - necessary to keep things compiling. to be removed later */ | ||
86 | u32 pmtmr_ioport; | ||
87 | |||
88 | /* | 85 | /* |
89 | * This is a special lock that is owned by the CPU and holds the index | 86 | * This is a special lock that is owned by the CPU and holds the index |
90 | * register we are working with. It is required for NMI access to the | 87 | * register we are working with. It is required for NMI access to the |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 96b307495e5f..7713f86389af 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -4,11 +4,14 @@ | |||
4 | * See comments there for proper credits. | 4 | * See comments there for proper credits. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/clocksource.h> | ||
7 | #include <linux/workqueue.h> | 8 | #include <linux/workqueue.h> |
8 | #include <linux/cpufreq.h> | 9 | #include <linux/cpufreq.h> |
9 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
10 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/dmi.h> | ||
11 | 13 | ||
14 | #include <asm/delay.h> | ||
12 | #include <asm/tsc.h> | 15 | #include <asm/tsc.h> |
13 | #include <asm/delay.h> | 16 | #include <asm/delay.h> |
14 | #include <asm/io.h> | 17 | #include <asm/io.h> |
@@ -315,3 +318,161 @@ static int __init cpufreq_tsc(void) | |||
315 | core_initcall(cpufreq_tsc); | 318 | core_initcall(cpufreq_tsc); |
316 | 319 | ||
317 | #endif | 320 | #endif |
321 | |||
322 | /* clock source code */ | ||
323 | |||
324 | static unsigned long current_tsc_khz = 0; | ||
325 | static int tsc_update_callback(void); | ||
326 | |||
327 | static cycle_t read_tsc(void) | ||
328 | { | ||
329 | cycle_t ret; | ||
330 | |||
331 | rdtscll(ret); | ||
332 | |||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | static struct clocksource clocksource_tsc = { | ||
337 | .name = "tsc", | ||
338 | .rating = 300, | ||
339 | .read = read_tsc, | ||
340 | .mask = (cycle_t)-1, | ||
341 | .mult = 0, /* to be set */ | ||
342 | .shift = 22, | ||
343 | .update_callback = tsc_update_callback, | ||
344 | .is_continuous = 1, | ||
345 | }; | ||
346 | |||
347 | static int tsc_update_callback(void) | ||
348 | { | ||
349 | int change = 0; | ||
350 | |||
351 | /* check to see if we should switch to the safe clocksource: */ | ||
352 | if (clocksource_tsc.rating != 50 && check_tsc_unstable()) { | ||
353 | clocksource_tsc.rating = 50; | ||
354 | reselect_clocksource(); | ||
355 | change = 1; | ||
356 | } | ||
357 | |||
358 | /* only update if tsc_khz has changed: */ | ||
359 | if (current_tsc_khz != tsc_khz) { | ||
360 | current_tsc_khz = tsc_khz; | ||
361 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
362 | clocksource_tsc.shift); | ||
363 | change = 1; | ||
364 | } | ||
365 | |||
366 | return change; | ||
367 | } | ||
368 | |||
369 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) | ||
370 | { | ||
371 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
372 | d->ident); | ||
373 | mark_tsc_unstable(); | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /* List of systems that have known TSC problems */ | ||
378 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
379 | { | ||
380 | .callback = dmi_mark_tsc_unstable, | ||
381 | .ident = "IBM Thinkpad 380XD", | ||
382 | .matches = { | ||
383 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
384 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
385 | }, | ||
386 | }, | ||
387 | {} | ||
388 | }; | ||
389 | |||
390 | #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */ | ||
391 | static struct timer_list verify_tsc_freq_timer; | ||
392 | |||
393 | /* XXX - Probably should add locking */ | ||
394 | static void verify_tsc_freq(unsigned long unused) | ||
395 | { | ||
396 | static u64 last_tsc; | ||
397 | static unsigned long last_jiffies; | ||
398 | |||
399 | u64 now_tsc, interval_tsc; | ||
400 | unsigned long now_jiffies, interval_jiffies; | ||
401 | |||
402 | |||
403 | if (check_tsc_unstable()) | ||
404 | return; | ||
405 | |||
406 | rdtscll(now_tsc); | ||
407 | now_jiffies = jiffies; | ||
408 | |||
409 | if (!last_jiffies) { | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | interval_jiffies = now_jiffies - last_jiffies; | ||
414 | interval_tsc = now_tsc - last_tsc; | ||
415 | interval_tsc *= HZ; | ||
416 | do_div(interval_tsc, cpu_khz*1000); | ||
417 | |||
418 | if (interval_tsc < (interval_jiffies * 3 / 4)) { | ||
419 | printk("TSC appears to be running slowly. " | ||
420 | "Marking it as unstable\n"); | ||
421 | mark_tsc_unstable(); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | out: | ||
426 | last_tsc = now_tsc; | ||
427 | last_jiffies = now_jiffies; | ||
428 | /* set us up to go off on the next interval: */ | ||
429 | mod_timer(&verify_tsc_freq_timer, | ||
430 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL)); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
435 | * over all CPUs. | ||
436 | */ | ||
437 | static __init int unsynchronized_tsc(void) | ||
438 | { | ||
439 | /* | ||
440 | * Intel systems are normally all synchronized. | ||
441 | * Exceptions must mark TSC as unstable: | ||
442 | */ | ||
443 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
444 | return 0; | ||
445 | |||
446 | /* assume multi socket systems are not synchronized: */ | ||
447 | return num_possible_cpus() > 1; | ||
448 | } | ||
449 | |||
450 | static int __init init_tsc_clocksource(void) | ||
451 | { | ||
452 | |||
453 | if (cpu_has_tsc && tsc_khz && !tsc_disable) { | ||
454 | /* check blacklist */ | ||
455 | dmi_check_system(bad_tsc_dmi_table); | ||
456 | |||
457 | if (unsynchronized_tsc()) /* mark unstable if unsynced */ | ||
458 | mark_tsc_unstable(); | ||
459 | current_tsc_khz = tsc_khz; | ||
460 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
461 | clocksource_tsc.shift); | ||
462 | /* lower the rating if we already know its unstable: */ | ||
463 | if (check_tsc_unstable()) | ||
464 | clocksource_tsc.rating = 50; | ||
465 | |||
466 | init_timer(&verify_tsc_freq_timer); | ||
467 | verify_tsc_freq_timer.function = verify_tsc_freq; | ||
468 | verify_tsc_freq_timer.expires = | ||
469 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL); | ||
470 | add_timer(&verify_tsc_freq_timer); | ||
471 | |||
472 | return register_clocksource(&clocksource_tsc); | ||
473 | } | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | module_init(init_tsc_clocksource); | ||