diff options
author | john stultz <johnstul@us.ibm.com> | 2007-02-16 04:28:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-16 11:14:00 -0500 |
commit | 1489939f0ab64b96998e04068c516c39afe29654 (patch) | |
tree | 6bb3ca772edf1dd8877482dc3b6bcc6f0d699e72 /arch/x86_64/kernel/time.c | |
parent | c37e7bb5d2ce36ef377caabfced0b132bb1bf6a7 (diff) |
[PATCH] time: x86_64: convert x86_64 to use GENERIC_TIME
This patch converts x86_64 to use the GENERIC_TIME infrastructure and adds
clocksource structures for both TSC and HPET (ACPI PM is shared w/ i386).
[akpm@osdl.org: fix printk timestamps]
[akpm@osdl.org: fix printk ckeanups]
[akpm@osdl.org: hpet build fix]
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <ak@muc.de>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/kernel/time.c')
-rw-r--r-- | arch/x86_64/kernel/time.c | 300 |
1 files changed, 6 insertions, 294 deletions
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 216fc9d74247..d84208e3c618 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -44,9 +44,6 @@ | |||
44 | #include <asm/apic.h> | 44 | #include <asm/apic.h> |
45 | #include <asm/hpet.h> | 45 | #include <asm/hpet.h> |
46 | 46 | ||
47 | #ifdef CONFIG_CPU_FREQ | ||
48 | extern void cpufreq_delayed_get(void); | ||
49 | #endif | ||
50 | extern void i8254_timer_resume(void); | 47 | extern void i8254_timer_resume(void); |
51 | extern int using_apic_timer; | 48 | extern int using_apic_timer; |
52 | 49 | ||
@@ -57,8 +54,6 @@ EXPORT_SYMBOL(rtc_lock); | |||
57 | DEFINE_SPINLOCK(i8253_lock); | 54 | DEFINE_SPINLOCK(i8253_lock); |
58 | 55 | ||
59 | unsigned long vxtime_hz = PIT_TICK_RATE; | 56 | unsigned long vxtime_hz = PIT_TICK_RATE; |
60 | int report_lost_ticks; /* command line option */ | ||
61 | unsigned long long monotonic_base; | ||
62 | 57 | ||
63 | struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */ | 58 | struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */ |
64 | 59 | ||
@@ -66,76 +61,6 @@ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; | |||
66 | struct timespec __xtime __section_xtime; | 61 | struct timespec __xtime __section_xtime; |
67 | struct timezone __sys_tz __section_sys_tz; | 62 | struct timezone __sys_tz __section_sys_tz; |
68 | 63 | ||
69 | unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; | ||
70 | |||
71 | /* | ||
72 | * This version of gettimeofday() has microsecond resolution and better than | ||
73 | * microsecond precision, as we're using at least a 10 MHz (usually 14.31818 | ||
74 | * MHz) HPET timer. | ||
75 | */ | ||
76 | |||
77 | void do_gettimeofday(struct timeval *tv) | ||
78 | { | ||
79 | unsigned long seq; | ||
80 | unsigned int sec, usec; | ||
81 | |||
82 | do { | ||
83 | seq = read_seqbegin(&xtime_lock); | ||
84 | |||
85 | sec = xtime.tv_sec; | ||
86 | usec = xtime.tv_nsec / NSEC_PER_USEC; | ||
87 | |||
88 | /* i386 does some correction here to keep the clock | ||
89 | monotonous even when ntpd is fixing drift. | ||
90 | But they didn't work for me, there is a non monotonic | ||
91 | clock anyways with ntp. | ||
92 | I dropped all corrections now until a real solution can | ||
93 | be found. Note when you fix it here you need to do the same | ||
94 | in arch/x86_64/kernel/vsyscall.c and export all needed | ||
95 | variables in vmlinux.lds. -AK */ | ||
96 | usec += do_gettimeoffset(); | ||
97 | |||
98 | } while (read_seqretry(&xtime_lock, seq)); | ||
99 | |||
100 | tv->tv_sec = sec + usec / USEC_PER_SEC; | ||
101 | tv->tv_usec = usec % USEC_PER_SEC; | ||
102 | } | ||
103 | |||
104 | EXPORT_SYMBOL(do_gettimeofday); | ||
105 | |||
106 | /* | ||
107 | * settimeofday() first undoes the correction that gettimeofday would do | ||
108 | * on the time, and then saves it. This is ugly, but has been like this for | ||
109 | * ages already. | ||
110 | */ | ||
111 | |||
112 | int do_settimeofday(struct timespec *tv) | ||
113 | { | ||
114 | time_t wtm_sec, sec = tv->tv_sec; | ||
115 | long wtm_nsec, nsec = tv->tv_nsec; | ||
116 | |||
117 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
118 | return -EINVAL; | ||
119 | |||
120 | write_seqlock_irq(&xtime_lock); | ||
121 | |||
122 | nsec -= do_gettimeoffset() * NSEC_PER_USEC; | ||
123 | |||
124 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
125 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
126 | |||
127 | set_normalized_timespec(&xtime, sec, nsec); | ||
128 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
129 | |||
130 | ntp_clear(); | ||
131 | |||
132 | write_sequnlock_irq(&xtime_lock); | ||
133 | clock_was_set(); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | EXPORT_SYMBOL(do_settimeofday); | ||
138 | |||
139 | unsigned long profile_pc(struct pt_regs *regs) | 64 | unsigned long profile_pc(struct pt_regs *regs) |
140 | { | 65 | { |
141 | unsigned long pc = instruction_pointer(regs); | 66 | unsigned long pc = instruction_pointer(regs); |
@@ -225,85 +150,9 @@ static void set_rtc_mmss(unsigned long nowtime) | |||
225 | } | 150 | } |
226 | 151 | ||
227 | 152 | ||
228 | /* monotonic_clock(): returns # of nanoseconds passed since time_init() | ||
229 | * Note: This function is required to return accurate | ||
230 | * time even in the absence of multiple timer ticks. | ||
231 | */ | ||
232 | extern unsigned long long cycles_2_ns(unsigned long long cyc); | ||
233 | unsigned long long monotonic_clock(void) | ||
234 | { | ||
235 | unsigned long seq; | ||
236 | u32 last_offset, this_offset, offset; | ||
237 | unsigned long long base; | ||
238 | |||
239 | if (vxtime.mode == VXTIME_HPET) { | ||
240 | do { | ||
241 | seq = read_seqbegin(&xtime_lock); | ||
242 | |||
243 | last_offset = vxtime.last; | ||
244 | base = monotonic_base; | ||
245 | this_offset = hpet_readl(HPET_COUNTER); | ||
246 | } while (read_seqretry(&xtime_lock, seq)); | ||
247 | offset = (this_offset - last_offset); | ||
248 | offset *= NSEC_PER_TICK / hpet_tick; | ||
249 | } else { | ||
250 | do { | ||
251 | seq = read_seqbegin(&xtime_lock); | ||
252 | |||
253 | last_offset = vxtime.last_tsc; | ||
254 | base = monotonic_base; | ||
255 | } while (read_seqretry(&xtime_lock, seq)); | ||
256 | this_offset = get_cycles_sync(); | ||
257 | offset = cycles_2_ns(this_offset - last_offset); | ||
258 | } | ||
259 | return base + offset; | ||
260 | } | ||
261 | EXPORT_SYMBOL(monotonic_clock); | ||
262 | |||
263 | static noinline void handle_lost_ticks(int lost) | ||
264 | { | ||
265 | static long lost_count; | ||
266 | static int warned; | ||
267 | if (report_lost_ticks) { | ||
268 | printk(KERN_WARNING "time.c: Lost %d timer tick(s)! ", lost); | ||
269 | print_symbol("rip %s)\n", get_irq_regs()->rip); | ||
270 | } | ||
271 | |||
272 | if (lost_count == 1000 && !warned) { | ||
273 | printk(KERN_WARNING "warning: many lost ticks.\n" | ||
274 | KERN_WARNING "Your time source seems to be instable or " | ||
275 | "some driver is hogging interupts\n"); | ||
276 | print_symbol("rip %s\n", get_irq_regs()->rip); | ||
277 | if (vxtime.mode == VXTIME_TSC && hpet_address) { | ||
278 | printk(KERN_WARNING "Falling back to HPET\n"); | ||
279 | if (hpet_use_timer) | ||
280 | vxtime.last = hpet_readl(HPET_T0_CMP) - | ||
281 | hpet_tick; | ||
282 | else | ||
283 | vxtime.last = hpet_readl(HPET_COUNTER); | ||
284 | vxtime.mode = VXTIME_HPET; | ||
285 | vxtime.hpet_address = hpet_address; | ||
286 | do_gettimeoffset = do_gettimeoffset_hpet; | ||
287 | } | ||
288 | /* else should fall back to PIT, but code missing. */ | ||
289 | warned = 1; | ||
290 | } else | ||
291 | lost_count++; | ||
292 | |||
293 | #ifdef CONFIG_CPU_FREQ | ||
294 | /* In some cases the CPU can change frequency without us noticing | ||
295 | Give cpufreq a change to catch up. */ | ||
296 | if ((lost_count+1) % 25 == 0) | ||
297 | cpufreq_delayed_get(); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | void main_timer_handler(void) | 153 | void main_timer_handler(void) |
302 | { | 154 | { |
303 | static unsigned long rtc_update = 0; | 155 | static unsigned long rtc_update = 0; |
304 | unsigned long tsc; | ||
305 | int delay = 0, offset = 0, lost = 0; | ||
306 | |||
307 | /* | 156 | /* |
308 | * Here we are in the timer irq handler. We have irqs locally disabled (so we | 157 | * Here we are in the timer irq handler. We have irqs locally disabled (so we |
309 | * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running | 158 | * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running |
@@ -313,72 +162,11 @@ void main_timer_handler(void) | |||
313 | 162 | ||
314 | write_seqlock(&xtime_lock); | 163 | write_seqlock(&xtime_lock); |
315 | 164 | ||
316 | if (hpet_address) | ||
317 | offset = hpet_readl(HPET_COUNTER); | ||
318 | |||
319 | if (hpet_use_timer) { | ||
320 | /* if we're using the hpet timer functionality, | ||
321 | * we can more accurately know the counter value | ||
322 | * when the timer interrupt occured. | ||
323 | */ | ||
324 | offset = hpet_readl(HPET_T0_CMP) - hpet_tick; | ||
325 | delay = hpet_readl(HPET_COUNTER) - offset; | ||
326 | } else if (!pmtmr_ioport) { | ||
327 | spin_lock(&i8253_lock); | ||
328 | outb_p(0x00, 0x43); | ||
329 | delay = inb_p(0x40); | ||
330 | delay |= inb(0x40) << 8; | ||
331 | spin_unlock(&i8253_lock); | ||
332 | delay = LATCH - 1 - delay; | ||
333 | } | ||
334 | |||
335 | tsc = get_cycles_sync(); | ||
336 | |||
337 | if (vxtime.mode == VXTIME_HPET) { | ||
338 | if (offset - vxtime.last > hpet_tick) { | ||
339 | lost = (offset - vxtime.last) / hpet_tick - 1; | ||
340 | } | ||
341 | |||
342 | monotonic_base += | ||
343 | (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick; | ||
344 | |||
345 | vxtime.last = offset; | ||
346 | #ifdef CONFIG_X86_PM_TIMER | ||
347 | } else if (vxtime.mode == VXTIME_PMTMR) { | ||
348 | lost = pmtimer_mark_offset(); | ||
349 | #endif | ||
350 | } else { | ||
351 | offset = (((tsc - vxtime.last_tsc) * | ||
352 | vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK; | ||
353 | |||
354 | if (offset < 0) | ||
355 | offset = 0; | ||
356 | |||
357 | if (offset > USEC_PER_TICK) { | ||
358 | lost = offset / USEC_PER_TICK; | ||
359 | offset %= USEC_PER_TICK; | ||
360 | } | ||
361 | |||
362 | monotonic_base += cycles_2_ns(tsc - vxtime.last_tsc); | ||
363 | |||
364 | vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; | ||
365 | |||
366 | if ((((tsc - vxtime.last_tsc) * | ||
367 | vxtime.tsc_quot) >> US_SCALE) < offset) | ||
368 | vxtime.last_tsc = tsc - | ||
369 | (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1; | ||
370 | } | ||
371 | |||
372 | if (lost > 0) | ||
373 | handle_lost_ticks(lost); | ||
374 | else | ||
375 | lost = 0; | ||
376 | |||
377 | /* | 165 | /* |
378 | * Do the timer stuff. | 166 | * Do the timer stuff. |
379 | */ | 167 | */ |
380 | 168 | ||
381 | do_timer(lost + 1); | 169 | do_timer(1); |
382 | #ifndef CONFIG_SMP | 170 | #ifndef CONFIG_SMP |
383 | update_process_times(user_mode(get_irq_regs())); | 171 | update_process_times(user_mode(get_irq_regs())); |
384 | #endif | 172 | #endif |
@@ -537,12 +325,6 @@ void __init stop_timer_interrupt(void) | |||
537 | printk(KERN_INFO "timer: %s interrupt stopped.\n", name); | 325 | printk(KERN_INFO "timer: %s interrupt stopped.\n", name); |
538 | } | 326 | } |
539 | 327 | ||
540 | int __init time_setup(char *str) | ||
541 | { | ||
542 | report_lost_ticks = 1; | ||
543 | return 1; | ||
544 | } | ||
545 | |||
546 | static struct irqaction irq0 = { | 328 | static struct irqaction irq0 = { |
547 | timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL | 329 | timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL |
548 | }; | 330 | }; |
@@ -557,9 +339,7 @@ void __init time_init(void) | |||
557 | set_normalized_timespec(&wall_to_monotonic, | 339 | set_normalized_timespec(&wall_to_monotonic, |
558 | -xtime.tv_sec, -xtime.tv_nsec); | 340 | -xtime.tv_sec, -xtime.tv_nsec); |
559 | 341 | ||
560 | if (!hpet_arch_init()) | 342 | if (hpet_arch_init()) |
561 | vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period; | ||
562 | else | ||
563 | hpet_address = 0; | 343 | hpet_address = 0; |
564 | 344 | ||
565 | if (hpet_use_timer) { | 345 | if (hpet_use_timer) { |
@@ -567,82 +347,26 @@ void __init time_init(void) | |||
567 | tick_nsec = TICK_NSEC_HPET; | 347 | tick_nsec = TICK_NSEC_HPET; |
568 | cpu_khz = hpet_calibrate_tsc(); | 348 | cpu_khz = hpet_calibrate_tsc(); |
569 | timename = "HPET"; | 349 | timename = "HPET"; |
570 | #ifdef CONFIG_X86_PM_TIMER | ||
571 | } else if (pmtmr_ioport && !hpet_address) { | ||
572 | vxtime_hz = PM_TIMER_FREQUENCY; | ||
573 | timename = "PM"; | ||
574 | pit_init(); | ||
575 | cpu_khz = pit_calibrate_tsc(); | ||
576 | #endif | ||
577 | } else { | 350 | } else { |
578 | pit_init(); | 351 | pit_init(); |
579 | cpu_khz = pit_calibrate_tsc(); | 352 | cpu_khz = pit_calibrate_tsc(); |
580 | timename = "PIT"; | 353 | timename = "PIT"; |
581 | } | 354 | } |
582 | 355 | ||
583 | vxtime.mode = VXTIME_TSC; | ||
584 | vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; | ||
585 | vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; | ||
586 | vxtime.last_tsc = get_cycles_sync(); | ||
587 | set_cyc2ns_scale(cpu_khz); | ||
588 | setup_irq(0, &irq0); | ||
589 | |||
590 | #ifndef CONFIG_SMP | ||
591 | time_init_gtod(); | ||
592 | #endif | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * Decide what mode gettimeofday should use. | ||
597 | */ | ||
598 | void time_init_gtod(void) | ||
599 | { | ||
600 | char *timetype; | ||
601 | |||
602 | if (unsynchronized_tsc()) | 356 | if (unsynchronized_tsc()) |
603 | notsc = 1; | 357 | mark_tsc_unstable(); |
604 | 358 | ||
605 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) | 359 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) |
606 | vgetcpu_mode = VGETCPU_RDTSCP; | 360 | vgetcpu_mode = VGETCPU_RDTSCP; |
607 | else | 361 | else |
608 | vgetcpu_mode = VGETCPU_LSL; | 362 | vgetcpu_mode = VGETCPU_LSL; |
609 | 363 | ||
610 | if (hpet_address && notsc) { | 364 | set_cyc2ns_scale(cpu_khz); |
611 | timetype = hpet_use_timer ? "HPET" : "PIT/HPET"; | ||
612 | if (hpet_use_timer) | ||
613 | vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick; | ||
614 | else | ||
615 | vxtime.last = hpet_readl(HPET_COUNTER); | ||
616 | vxtime.mode = VXTIME_HPET; | ||
617 | vxtime.hpet_address = hpet_address; | ||
618 | do_gettimeoffset = do_gettimeoffset_hpet; | ||
619 | #ifdef CONFIG_X86_PM_TIMER | ||
620 | /* Using PM for gettimeofday is quite slow, but we have no other | ||
621 | choice because the TSC is too unreliable on some systems. */ | ||
622 | } else if (pmtmr_ioport && !hpet_address && notsc) { | ||
623 | timetype = "PM"; | ||
624 | do_gettimeoffset = do_gettimeoffset_pm; | ||
625 | vxtime.mode = VXTIME_PMTMR; | ||
626 | sysctl_vsyscall = 0; | ||
627 | printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n"); | ||
628 | #endif | ||
629 | } else { | ||
630 | timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC"; | ||
631 | vxtime.mode = VXTIME_TSC; | ||
632 | } | ||
633 | |||
634 | printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n", | ||
635 | vxtime_hz / 1000000, vxtime_hz % 1000000, timename, timetype); | ||
636 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 365 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
637 | cpu_khz / 1000, cpu_khz % 1000); | 366 | cpu_khz / 1000, cpu_khz % 1000); |
638 | vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; | 367 | setup_irq(0, &irq0); |
639 | vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; | ||
640 | vxtime.last_tsc = get_cycles_sync(); | ||
641 | |||
642 | set_cyc2ns_scale(cpu_khz); | ||
643 | } | 368 | } |
644 | 369 | ||
645 | __setup("report_lost_ticks", time_setup); | ||
646 | 370 | ||
647 | static long clock_cmos_diff; | 371 | static long clock_cmos_diff; |
648 | static unsigned long sleep_start; | 372 | static unsigned long sleep_start; |
@@ -688,20 +412,8 @@ static int timer_resume(struct sys_device *dev) | |||
688 | write_seqlock_irqsave(&xtime_lock,flags); | 412 | write_seqlock_irqsave(&xtime_lock,flags); |
689 | xtime.tv_sec = sec; | 413 | xtime.tv_sec = sec; |
690 | xtime.tv_nsec = 0; | 414 | xtime.tv_nsec = 0; |
691 | if (vxtime.mode == VXTIME_HPET) { | ||
692 | if (hpet_use_timer) | ||
693 | vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick; | ||
694 | else | ||
695 | vxtime.last = hpet_readl(HPET_COUNTER); | ||
696 | #ifdef CONFIG_X86_PM_TIMER | ||
697 | } else if (vxtime.mode == VXTIME_PMTMR) { | ||
698 | pmtimer_resume(); | ||
699 | #endif | ||
700 | } else | ||
701 | vxtime.last_tsc = get_cycles_sync(); | ||
702 | write_sequnlock_irqrestore(&xtime_lock,flags); | ||
703 | jiffies += sleep_length; | 415 | jiffies += sleep_length; |
704 | monotonic_base += sleep_length * (NSEC_PER_SEC/HZ); | 416 | write_sequnlock_irqrestore(&xtime_lock,flags); |
705 | touch_softlockup_watchdog(); | 417 | touch_softlockup_watchdog(); |
706 | return 0; | 418 | return 0; |
707 | } | 419 | } |