diff options
author | Vojtech Pavlik <vojtech@suse.cz> | 2006-06-26 07:58:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 13:48:19 -0400 |
commit | 4221133845f81ab4428c79a89e37be2c87624c1a (patch) | |
tree | 884bfc1913bff964be43321cedb4653bfa976cf3 /arch/x86_64/kernel/time.c | |
parent | e30db3e69948dafb616d121e52c8d62d38dab68c (diff) |
[PATCH] x86_64: Make use of the *PER* constants in time.c
This patch makes use of the newly added conversion constants
in time.h to x86-64 time.c. The code gets significantly easier
to understand.
Signed-off-by: Vojtech Pavlik <vojtech@suse.cz>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/time.c')
-rw-r--r-- | arch/x86_64/kernel/time.c | 63 |
1 files changed, 35 insertions, 28 deletions
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index ab9bea82945f..51afb07bc14e 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -56,6 +56,13 @@ DEFINE_SPINLOCK(i8253_lock); | |||
56 | int nohpet __initdata = 0; | 56 | int nohpet __initdata = 0; |
57 | static int notsc __initdata = 0; | 57 | static int notsc __initdata = 0; |
58 | 58 | ||
59 | #define USEC_PER_TICK (USEC_PER_SEC / HZ) | ||
60 | #define NSEC_PER_TICK (NSEC_PER_SEC / HZ) | ||
61 | #define FSEC_PER_TICK (FSEC_PER_SEC / HZ) | ||
62 | |||
63 | #define NS_SCALE 10 /* 2^10, carefully chosen */ | ||
64 | #define US_SCALE 32 /* 2^32, arbitralrily chosen */ | ||
65 | |||
59 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | 66 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ |
60 | static unsigned long hpet_period; /* fsecs / HPET clock */ | 67 | static unsigned long hpet_period; /* fsecs / HPET clock */ |
61 | unsigned long hpet_tick; /* HPET clocks / interrupt */ | 68 | unsigned long hpet_tick; /* HPET clocks / interrupt */ |
@@ -88,7 +95,7 @@ static inline unsigned int do_gettimeoffset_tsc(void) | |||
88 | t = get_cycles_sync(); | 95 | t = get_cycles_sync(); |
89 | if (t < vxtime.last_tsc) | 96 | if (t < vxtime.last_tsc) |
90 | t = vxtime.last_tsc; /* hack */ | 97 | t = vxtime.last_tsc; /* hack */ |
91 | x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32; | 98 | x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE; |
92 | return x; | 99 | return x; |
93 | } | 100 | } |
94 | 101 | ||
@@ -96,7 +103,7 @@ static inline unsigned int do_gettimeoffset_hpet(void) | |||
96 | { | 103 | { |
97 | /* cap counter read to one tick to avoid inconsistencies */ | 104 | /* cap counter read to one tick to avoid inconsistencies */ |
98 | unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last; | 105 | unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last; |
99 | return (min(counter,hpet_tick) * vxtime.quot) >> 32; | 106 | return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE; |
100 | } | 107 | } |
101 | 108 | ||
102 | unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; | 109 | unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; |
@@ -116,7 +123,7 @@ void do_gettimeofday(struct timeval *tv) | |||
116 | seq = read_seqbegin(&xtime_lock); | 123 | seq = read_seqbegin(&xtime_lock); |
117 | 124 | ||
118 | sec = xtime.tv_sec; | 125 | sec = xtime.tv_sec; |
119 | usec = xtime.tv_nsec / 1000; | 126 | usec = xtime.tv_nsec / NSEC_PER_USEC; |
120 | 127 | ||
121 | /* i386 does some correction here to keep the clock | 128 | /* i386 does some correction here to keep the clock |
122 | monotonous even when ntpd is fixing drift. | 129 | monotonous even when ntpd is fixing drift. |
@@ -127,14 +134,14 @@ void do_gettimeofday(struct timeval *tv) | |||
127 | in arch/x86_64/kernel/vsyscall.c and export all needed | 134 | in arch/x86_64/kernel/vsyscall.c and export all needed |
128 | variables in vmlinux.lds. -AK */ | 135 | variables in vmlinux.lds. -AK */ |
129 | 136 | ||
130 | t = (jiffies - wall_jiffies) * (1000000L / HZ) + | 137 | t = (jiffies - wall_jiffies) * USEC_PER_TICK + |
131 | do_gettimeoffset(); | 138 | do_gettimeoffset(); |
132 | usec += t; | 139 | usec += t; |
133 | 140 | ||
134 | } while (read_seqretry(&xtime_lock, seq)); | 141 | } while (read_seqretry(&xtime_lock, seq)); |
135 | 142 | ||
136 | tv->tv_sec = sec + usec / 1000000; | 143 | tv->tv_sec = sec + usec / USEC_PER_SEC; |
137 | tv->tv_usec = usec % 1000000; | 144 | tv->tv_usec = usec % USEC_PER_SEC; |
138 | } | 145 | } |
139 | 146 | ||
140 | EXPORT_SYMBOL(do_gettimeofday); | 147 | EXPORT_SYMBOL(do_gettimeofday); |
@@ -155,8 +162,8 @@ int do_settimeofday(struct timespec *tv) | |||
155 | 162 | ||
156 | write_seqlock_irq(&xtime_lock); | 163 | write_seqlock_irq(&xtime_lock); |
157 | 164 | ||
158 | nsec -= do_gettimeoffset() * 1000 + | 165 | nsec -= do_gettimeoffset() * NSEC_PER_USEC + |
159 | (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ); | 166 | (jiffies - wall_jiffies) * NSEC_PER_TICK; |
160 | 167 | ||
161 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | 168 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); |
162 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | 169 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); |
@@ -286,7 +293,7 @@ unsigned long long monotonic_clock(void) | |||
286 | this_offset = hpet_readl(HPET_COUNTER); | 293 | this_offset = hpet_readl(HPET_COUNTER); |
287 | } while (read_seqretry(&xtime_lock, seq)); | 294 | } while (read_seqretry(&xtime_lock, seq)); |
288 | offset = (this_offset - last_offset); | 295 | offset = (this_offset - last_offset); |
289 | offset *= (NSEC_PER_SEC/HZ) / hpet_tick; | 296 | offset *= NSEC_PER_TICK / hpet_tick; |
290 | } else { | 297 | } else { |
291 | do { | 298 | do { |
292 | seq = read_seqbegin(&xtime_lock); | 299 | seq = read_seqbegin(&xtime_lock); |
@@ -295,7 +302,8 @@ unsigned long long monotonic_clock(void) | |||
295 | base = monotonic_base; | 302 | base = monotonic_base; |
296 | } while (read_seqretry(&xtime_lock, seq)); | 303 | } while (read_seqretry(&xtime_lock, seq)); |
297 | this_offset = get_cycles_sync(); | 304 | this_offset = get_cycles_sync(); |
298 | offset = (this_offset - last_offset)*1000 / cpu_khz; | 305 | /* FIXME: 1000 or 1000000? */ |
306 | offset = (this_offset - last_offset)*1000 / cpu_khz; | ||
299 | } | 307 | } |
300 | return base + offset; | 308 | return base + offset; |
301 | } | 309 | } |
@@ -380,7 +388,7 @@ void main_timer_handler(struct pt_regs *regs) | |||
380 | } | 388 | } |
381 | 389 | ||
382 | monotonic_base += | 390 | monotonic_base += |
383 | (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick; | 391 | (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick; |
384 | 392 | ||
385 | vxtime.last = offset; | 393 | vxtime.last = offset; |
386 | #ifdef CONFIG_X86_PM_TIMER | 394 | #ifdef CONFIG_X86_PM_TIMER |
@@ -389,24 +397,25 @@ void main_timer_handler(struct pt_regs *regs) | |||
389 | #endif | 397 | #endif |
390 | } else { | 398 | } else { |
391 | offset = (((tsc - vxtime.last_tsc) * | 399 | offset = (((tsc - vxtime.last_tsc) * |
392 | vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ); | 400 | vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK; |
393 | 401 | ||
394 | if (offset < 0) | 402 | if (offset < 0) |
395 | offset = 0; | 403 | offset = 0; |
396 | 404 | ||
397 | if (offset > (USEC_PER_SEC / HZ)) { | 405 | if (offset > USEC_PER_TICK) { |
398 | lost = offset / (USEC_PER_SEC / HZ); | 406 | lost = offset / USEC_PER_TICK; |
399 | offset %= (USEC_PER_SEC / HZ); | 407 | offset %= USEC_PER_TICK; |
400 | } | 408 | } |
401 | 409 | ||
402 | monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ; | 410 | /* FIXME: 1000 or 1000000? */ |
411 | monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz; | ||
403 | 412 | ||
404 | vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; | 413 | vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; |
405 | 414 | ||
406 | if ((((tsc - vxtime.last_tsc) * | 415 | if ((((tsc - vxtime.last_tsc) * |
407 | vxtime.tsc_quot) >> 32) < offset) | 416 | vxtime.tsc_quot) >> US_SCALE) < offset) |
408 | vxtime.last_tsc = tsc - | 417 | vxtime.last_tsc = tsc - |
409 | (((long) offset << 32) / vxtime.tsc_quot) - 1; | 418 | (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1; |
410 | } | 419 | } |
411 | 420 | ||
412 | if (lost > 0) { | 421 | if (lost > 0) { |
@@ -466,16 +475,15 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
466 | } | 475 | } |
467 | 476 | ||
468 | static unsigned int cyc2ns_scale __read_mostly; | 477 | static unsigned int cyc2ns_scale __read_mostly; |
469 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
470 | 478 | ||
471 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | 479 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) |
472 | { | 480 | { |
473 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | 481 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz; |
474 | } | 482 | } |
475 | 483 | ||
476 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 484 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
477 | { | 485 | { |
478 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | 486 | return (cyc * cyc2ns_scale) >> NS_SCALE; |
479 | } | 487 | } |
480 | 488 | ||
481 | unsigned long long sched_clock(void) | 489 | unsigned long long sched_clock(void) |
@@ -488,7 +496,7 @@ unsigned long long sched_clock(void) | |||
488 | Disadvantage is a small drift between CPUs in some configurations, | 496 | Disadvantage is a small drift between CPUs in some configurations, |
489 | but that should be tolerable. */ | 497 | but that should be tolerable. */ |
490 | if (__vxtime.mode == VXTIME_HPET) | 498 | if (__vxtime.mode == VXTIME_HPET) |
491 | return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32; | 499 | return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE; |
492 | #endif | 500 | #endif |
493 | 501 | ||
494 | /* Could do CPU core sync here. Opteron can execute rdtsc speculatively, | 502 | /* Could do CPU core sync here. Opteron can execute rdtsc speculatively, |
@@ -631,7 +639,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
631 | 639 | ||
632 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); | 640 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); |
633 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | 641 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
634 | vxtime.tsc_quot = (1000L << 32) / cpu_khz; | 642 | vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; |
635 | } | 643 | } |
636 | 644 | ||
637 | set_cyc2ns_scale(cpu_khz_ref); | 645 | set_cyc2ns_scale(cpu_khz_ref); |
@@ -823,8 +831,7 @@ static int hpet_init(void) | |||
823 | if (hpet_period < 100000 || hpet_period > 100000000) | 831 | if (hpet_period < 100000 || hpet_period > 100000000) |
824 | return -1; | 832 | return -1; |
825 | 833 | ||
826 | hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / | 834 | hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period; |
827 | hpet_period; | ||
828 | 835 | ||
829 | hpet_use_timer = (id & HPET_ID_LEGSUP); | 836 | hpet_use_timer = (id & HPET_ID_LEGSUP); |
830 | 837 | ||
@@ -898,7 +905,7 @@ void __init time_init(void) | |||
898 | -xtime.tv_sec, -xtime.tv_nsec); | 905 | -xtime.tv_sec, -xtime.tv_nsec); |
899 | 906 | ||
900 | if (!hpet_init()) | 907 | if (!hpet_init()) |
901 | vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period; | 908 | vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period; |
902 | else | 909 | else |
903 | vxtime.hpet_address = 0; | 910 | vxtime.hpet_address = 0; |
904 | 911 | ||
@@ -927,8 +934,8 @@ void __init time_init(void) | |||
927 | vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod); | 934 | vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod); |
928 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 935 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
929 | cpu_khz / 1000, cpu_khz % 1000); | 936 | cpu_khz / 1000, cpu_khz % 1000); |
930 | vxtime.quot = (1000000L << 32) / vxtime_hz; | 937 | vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; |
931 | vxtime.tsc_quot = (1000L << 32) / cpu_khz; | 938 | vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; |
932 | vxtime.last_tsc = get_cycles_sync(); | 939 | vxtime.last_tsc = get_cycles_sync(); |
933 | setup_irq(0, &irq0); | 940 | setup_irq(0, &irq0); |
934 | 941 | ||