diff options
author | john stultz <johnstul@us.ibm.com> | 2006-06-26 03:25:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:21 -0400 |
commit | 6f84fa2f3edc8902cfed02cd510c7c58334bb9bd (patch) | |
tree | afc4229a3ff0aa7e9956759334b7ad1f1b4827d9 /arch | |
parent | 539eb11e6e904f2cd4f62908cc5e44d724879721 (diff) |
[PATCH] Time: i386 Conversion - part 3: Enable Generic Timekeeping
This converts the i386 arch to use the generic timeofday subsystem. It
enabled the GENERIC_TIME option, disables the timer_opts code and other arch
specific timekeeping code and reworks the delay code.
While this patch enables the generic timekeeping, please note that this patch
does not provide any i386 clocksource. Thus only the jiffies clocksource will
be available. To get full replacements for the code being disabled here, the
timeofday-clocks-i386 patch will needed.
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/Kconfig | 4 | ||||
-rw-r--r-- | arch/i386/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 153 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 3 | ||||
-rw-r--r-- | arch/i386/lib/delay.c | 65 |
5 files changed, 82 insertions, 144 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 1596101cfaf8..db1f8b406063 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -14,6 +14,10 @@ config X86_32 | |||
14 | 486, 586, Pentiums, and various instruction-set-compatible chips by | 14 | 486, 586, Pentiums, and various instruction-set-compatible chips by |
15 | AMD, Cyrix, and others. | 15 | AMD, Cyrix, and others. |
16 | 16 | ||
17 | config GENERIC_TIME | ||
18 | bool | ||
19 | default y | ||
20 | |||
17 | config SEMAPHORE_SLEEPERS | 21 | config SEMAPHORE_SLEEPERS |
18 | bool | 22 | bool |
19 | default y | 23 | default y |
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index ca70d61ea834..f238cb6274eb 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -10,7 +10,6 @@ obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | |||
10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o |
11 | 11 | ||
12 | obj-y += cpu/ | 12 | obj-y += cpu/ |
13 | obj-y += timers/ | ||
14 | obj-y += acpi/ | 13 | obj-y += acpi/ |
15 | obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o | 14 | obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o |
16 | obj-$(CONFIG_MCA) += mca.o | 15 | obj-$(CONFIG_MCA) += mca.o |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 796e5faa6ca7..2a6ab86ffc15 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -82,7 +82,8 @@ extern unsigned long wall_jiffies; | |||
82 | DEFINE_SPINLOCK(rtc_lock); | 82 | DEFINE_SPINLOCK(rtc_lock); |
83 | EXPORT_SYMBOL(rtc_lock); | 83 | EXPORT_SYMBOL(rtc_lock); |
84 | 84 | ||
85 | struct timer_opts *cur_timer __read_mostly = &timer_none; | 85 | /* XXX - necessary to keep things compiling. to be removed later */ |
86 | u32 pmtmr_ioport; | ||
86 | 87 | ||
87 | /* | 88 | /* |
88 | * This is a special lock that is owned by the CPU and holds the index | 89 | * This is a special lock that is owned by the CPU and holds the index |
@@ -113,99 +114,19 @@ void rtc_cmos_write(unsigned char val, unsigned char addr) | |||
113 | } | 114 | } |
114 | EXPORT_SYMBOL(rtc_cmos_write); | 115 | EXPORT_SYMBOL(rtc_cmos_write); |
115 | 116 | ||
116 | /* | ||
117 | * This version of gettimeofday has microsecond resolution | ||
118 | * and better than microsecond precision on fast x86 machines with TSC. | ||
119 | */ | ||
120 | void do_gettimeofday(struct timeval *tv) | ||
121 | { | ||
122 | unsigned long seq; | ||
123 | unsigned long usec, sec; | ||
124 | unsigned long max_ntp_tick; | ||
125 | |||
126 | do { | ||
127 | unsigned long lost; | ||
128 | |||
129 | seq = read_seqbegin(&xtime_lock); | ||
130 | |||
131 | usec = cur_timer->get_offset(); | ||
132 | lost = jiffies - wall_jiffies; | ||
133 | |||
134 | /* | ||
135 | * If time_adjust is negative then NTP is slowing the clock | ||
136 | * so make sure not to go into next possible interval. | ||
137 | * Better to lose some accuracy than have time go backwards.. | ||
138 | */ | ||
139 | if (unlikely(time_adjust < 0)) { | ||
140 | max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj; | ||
141 | usec = min(usec, max_ntp_tick); | ||
142 | |||
143 | if (lost) | ||
144 | usec += lost * max_ntp_tick; | ||
145 | } | ||
146 | else if (unlikely(lost)) | ||
147 | usec += lost * (USEC_PER_SEC / HZ); | ||
148 | |||
149 | sec = xtime.tv_sec; | ||
150 | usec += (xtime.tv_nsec / 1000); | ||
151 | } while (read_seqretry(&xtime_lock, seq)); | ||
152 | |||
153 | while (usec >= 1000000) { | ||
154 | usec -= 1000000; | ||
155 | sec++; | ||
156 | } | ||
157 | |||
158 | tv->tv_sec = sec; | ||
159 | tv->tv_usec = usec; | ||
160 | } | ||
161 | |||
162 | EXPORT_SYMBOL(do_gettimeofday); | ||
163 | |||
164 | int do_settimeofday(struct timespec *tv) | ||
165 | { | ||
166 | time_t wtm_sec, sec = tv->tv_sec; | ||
167 | long wtm_nsec, nsec = tv->tv_nsec; | ||
168 | |||
169 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
170 | return -EINVAL; | ||
171 | |||
172 | write_seqlock_irq(&xtime_lock); | ||
173 | /* | ||
174 | * This is revolting. We need to set "xtime" correctly. However, the | ||
175 | * value in this location is the value at the most recent update of | ||
176 | * wall time. Discover what correction gettimeofday() would have | ||
177 | * made, and then undo it! | ||
178 | */ | ||
179 | nsec -= cur_timer->get_offset() * NSEC_PER_USEC; | ||
180 | nsec -= (jiffies - wall_jiffies) * TICK_NSEC; | ||
181 | |||
182 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
183 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
184 | |||
185 | set_normalized_timespec(&xtime, sec, nsec); | ||
186 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
187 | |||
188 | ntp_clear(); | ||
189 | write_sequnlock_irq(&xtime_lock); | ||
190 | clock_was_set(); | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | EXPORT_SYMBOL(do_settimeofday); | ||
195 | |||
196 | static int set_rtc_mmss(unsigned long nowtime) | 117 | static int set_rtc_mmss(unsigned long nowtime) |
197 | { | 118 | { |
198 | int retval; | 119 | int retval; |
199 | 120 | unsigned long flags; | |
200 | WARN_ON(irqs_disabled()); | ||
201 | 121 | ||
202 | /* gets recalled with irq locally disabled */ | 122 | /* gets recalled with irq locally disabled */ |
203 | spin_lock_irq(&rtc_lock); | 123 | /* XXX - does irqsave resolve this? -johnstul */ |
124 | spin_lock_irqsave(&rtc_lock, flags); | ||
204 | if (efi_enabled) | 125 | if (efi_enabled) |
205 | retval = efi_set_rtc_mmss(nowtime); | 126 | retval = efi_set_rtc_mmss(nowtime); |
206 | else | 127 | else |
207 | retval = mach_set_rtc_mmss(nowtime); | 128 | retval = mach_set_rtc_mmss(nowtime); |
208 | spin_unlock_irq(&rtc_lock); | 129 | spin_unlock_irqrestore(&rtc_lock, flags); |
209 | 130 | ||
210 | return retval; | 131 | return retval; |
211 | } | 132 | } |
@@ -213,16 +134,6 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
213 | 134 | ||
214 | int timer_ack; | 135 | int timer_ack; |
215 | 136 | ||
216 | /* monotonic_clock(): returns # of nanoseconds passed since time_init() | ||
217 | * Note: This function is required to return accurate | ||
218 | * time even in the absence of multiple timer ticks. | ||
219 | */ | ||
220 | unsigned long long monotonic_clock(void) | ||
221 | { | ||
222 | return cur_timer->monotonic_clock(); | ||
223 | } | ||
224 | EXPORT_SYMBOL(monotonic_clock); | ||
225 | |||
226 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) | 137 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) |
227 | unsigned long profile_pc(struct pt_regs *regs) | 138 | unsigned long profile_pc(struct pt_regs *regs) |
228 | { | 139 | { |
@@ -237,11 +148,21 @@ EXPORT_SYMBOL(profile_pc); | |||
237 | #endif | 148 | #endif |
238 | 149 | ||
239 | /* | 150 | /* |
240 | * timer_interrupt() needs to keep up the real-time clock, | 151 | * This is the same as the above, except we _also_ save the current |
241 | * as well as call the "do_timer()" routine every clocktick | 152 | * Time Stamp Counter value at the time of the timer interrupt, so that |
153 | * we later on can estimate the time of day more exactly. | ||
242 | */ | 154 | */ |
243 | static inline void do_timer_interrupt(int irq, struct pt_regs *regs) | 155 | irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
244 | { | 156 | { |
157 | /* | ||
158 | * Here we are in the timer irq handler. We just have irqs locally | ||
159 | * disabled but we don't know if the timer_bh is running on the other | ||
160 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | ||
161 | * the irq version of write_lock because as just said we have irq | ||
162 | * locally disabled. -arca | ||
163 | */ | ||
164 | write_seqlock(&xtime_lock); | ||
165 | |||
245 | #ifdef CONFIG_X86_IO_APIC | 166 | #ifdef CONFIG_X86_IO_APIC |
246 | if (timer_ack) { | 167 | if (timer_ack) { |
247 | /* | 168 | /* |
@@ -274,27 +195,6 @@ static inline void do_timer_interrupt(int irq, struct pt_regs *regs) | |||
274 | irq = inb_p( 0x61 ); /* read the current state */ | 195 | irq = inb_p( 0x61 ); /* read the current state */ |
275 | outb_p( irq|0x80, 0x61 ); /* reset the IRQ */ | 196 | outb_p( irq|0x80, 0x61 ); /* reset the IRQ */ |
276 | } | 197 | } |
277 | } | ||
278 | |||
279 | /* | ||
280 | * This is the same as the above, except we _also_ save the current | ||
281 | * Time Stamp Counter value at the time of the timer interrupt, so that | ||
282 | * we later on can estimate the time of day more exactly. | ||
283 | */ | ||
284 | irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
285 | { | ||
286 | /* | ||
287 | * Here we are in the timer irq handler. We just have irqs locally | ||
288 | * disabled but we don't know if the timer_bh is running on the other | ||
289 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | ||
290 | * the irq version of write_lock because as just said we have irq | ||
291 | * locally disabled. -arca | ||
292 | */ | ||
293 | write_seqlock(&xtime_lock); | ||
294 | |||
295 | cur_timer->mark_offset(); | ||
296 | |||
297 | do_timer_interrupt(irq, regs); | ||
298 | 198 | ||
299 | write_sequnlock(&xtime_lock); | 199 | write_sequnlock(&xtime_lock); |
300 | 200 | ||
@@ -375,7 +275,6 @@ void notify_arch_cmos_timer(void) | |||
375 | 275 | ||
376 | static long clock_cmos_diff, sleep_start; | 276 | static long clock_cmos_diff, sleep_start; |
377 | 277 | ||
378 | static struct timer_opts *last_timer; | ||
379 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 278 | static int timer_suspend(struct sys_device *dev, pm_message_t state) |
380 | { | 279 | { |
381 | /* | 280 | /* |
@@ -384,10 +283,6 @@ static int timer_suspend(struct sys_device *dev, pm_message_t state) | |||
384 | clock_cmos_diff = -get_cmos_time(); | 283 | clock_cmos_diff = -get_cmos_time(); |
385 | clock_cmos_diff += get_seconds(); | 284 | clock_cmos_diff += get_seconds(); |
386 | sleep_start = get_cmos_time(); | 285 | sleep_start = get_cmos_time(); |
387 | last_timer = cur_timer; | ||
388 | cur_timer = &timer_none; | ||
389 | if (last_timer->suspend) | ||
390 | last_timer->suspend(state); | ||
391 | return 0; | 286 | return 0; |
392 | } | 287 | } |
393 | 288 | ||
@@ -410,10 +305,6 @@ static int timer_resume(struct sys_device *dev) | |||
410 | jiffies_64 += sleep_length; | 305 | jiffies_64 += sleep_length; |
411 | wall_jiffies += sleep_length; | 306 | wall_jiffies += sleep_length; |
412 | write_sequnlock_irqrestore(&xtime_lock, flags); | 307 | write_sequnlock_irqrestore(&xtime_lock, flags); |
413 | if (last_timer->resume) | ||
414 | last_timer->resume(); | ||
415 | cur_timer = last_timer; | ||
416 | last_timer = NULL; | ||
417 | touch_softlockup_watchdog(); | 308 | touch_softlockup_watchdog(); |
418 | return 0; | 309 | return 0; |
419 | } | 310 | } |
@@ -455,9 +346,6 @@ static void __init hpet_time_init(void) | |||
455 | printk("Using HPET for base-timer\n"); | 346 | printk("Using HPET for base-timer\n"); |
456 | } | 347 | } |
457 | 348 | ||
458 | cur_timer = select_timer(); | ||
459 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); | ||
460 | |||
461 | time_init_hook(); | 349 | time_init_hook(); |
462 | } | 350 | } |
463 | #endif | 351 | #endif |
@@ -479,8 +367,5 @@ void __init time_init(void) | |||
479 | set_normalized_timespec(&wall_to_monotonic, | 367 | set_normalized_timespec(&wall_to_monotonic, |
480 | -xtime.tv_sec, -xtime.tv_nsec); | 368 | -xtime.tv_sec, -xtime.tv_nsec); |
481 | 369 | ||
482 | cur_timer = select_timer(); | ||
483 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); | ||
484 | |||
485 | time_init_hook(); | 370 | time_init_hook(); |
486 | } | 371 | } |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 3b64eaafce2b..96b307495e5f 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | 11 | ||
12 | #include <asm/tsc.h> | 12 | #include <asm/tsc.h> |
13 | #include <asm/delay.h> | ||
13 | #include <asm/io.h> | 14 | #include <asm/io.h> |
14 | 15 | ||
15 | #include "mach_timer.h" | 16 | #include "mach_timer.h" |
@@ -45,7 +46,6 @@ static int __init tsc_setup(char *str) | |||
45 | 46 | ||
46 | __setup("notsc", tsc_setup); | 47 | __setup("notsc", tsc_setup); |
47 | 48 | ||
48 | |||
49 | /* | 49 | /* |
50 | * code to mark and check if the TSC is unstable | 50 | * code to mark and check if the TSC is unstable |
51 | * due to cpufreq or due to unsynced TSCs | 51 | * due to cpufreq or due to unsynced TSCs |
@@ -205,6 +205,7 @@ void tsc_init(void) | |||
205 | (unsigned long)cpu_khz % 1000); | 205 | (unsigned long)cpu_khz % 1000); |
206 | 206 | ||
207 | set_cyc2ns_scale(cpu_khz); | 207 | set_cyc2ns_scale(cpu_khz); |
208 | use_tsc_delay(); | ||
208 | } | 209 | } |
209 | 210 | ||
210 | #ifdef CONFIG_CPU_FREQ | 211 | #ifdef CONFIG_CPU_FREQ |
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c index c49a6acbee56..3c0714c4b669 100644 --- a/arch/i386/lib/delay.c +++ b/arch/i386/lib/delay.c | |||
@@ -10,43 +10,92 @@ | |||
10 | * we have to worry about. | 10 | * we have to worry about. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
13 | #include <linux/config.h> | 14 | #include <linux/config.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
16 | #include <linux/module.h> | 17 | |
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
18 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
19 | #include <asm/timer.h> | 20 | #include <asm/timer.h> |
20 | 21 | ||
21 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
22 | #include <asm/smp.h> | 23 | # include <asm/smp.h> |
23 | #endif | 24 | #endif |
24 | 25 | ||
25 | extern struct timer_opts* timer; | 26 | /* simple loop based delay: */ |
27 | static void delay_loop(unsigned long loops) | ||
28 | { | ||
29 | int d0; | ||
30 | |||
31 | __asm__ __volatile__( | ||
32 | "\tjmp 1f\n" | ||
33 | ".align 16\n" | ||
34 | "1:\tjmp 2f\n" | ||
35 | ".align 16\n" | ||
36 | "2:\tdecl %0\n\tjns 2b" | ||
37 | :"=&a" (d0) | ||
38 | :"0" (loops)); | ||
39 | } | ||
40 | |||
41 | /* TSC based delay: */ | ||
42 | static void delay_tsc(unsigned long loops) | ||
43 | { | ||
44 | unsigned long bclock, now; | ||
45 | |||
46 | rdtscl(bclock); | ||
47 | do { | ||
48 | rep_nop(); | ||
49 | rdtscl(now); | ||
50 | } while ((now-bclock) < loops); | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Since we calibrate only once at boot, this | ||
55 | * function should be set once at boot and not changed | ||
56 | */ | ||
57 | static void (*delay_fn)(unsigned long) = delay_loop; | ||
58 | |||
59 | void use_tsc_delay(void) | ||
60 | { | ||
61 | delay_fn = delay_tsc; | ||
62 | } | ||
63 | |||
64 | int read_current_timer(unsigned long *timer_val) | ||
65 | { | ||
66 | if (delay_fn == delay_tsc) { | ||
67 | rdtscl(*timer_val); | ||
68 | return 0; | ||
69 | } | ||
70 | return -1; | ||
71 | } | ||
26 | 72 | ||
27 | void __delay(unsigned long loops) | 73 | void __delay(unsigned long loops) |
28 | { | 74 | { |
29 | cur_timer->delay(loops); | 75 | delay_fn(loops); |
30 | } | 76 | } |
31 | 77 | ||
32 | inline void __const_udelay(unsigned long xloops) | 78 | inline void __const_udelay(unsigned long xloops) |
33 | { | 79 | { |
34 | int d0; | 80 | int d0; |
81 | |||
35 | xloops *= 4; | 82 | xloops *= 4; |
36 | __asm__("mull %0" | 83 | __asm__("mull %0" |
37 | :"=d" (xloops), "=&a" (d0) | 84 | :"=d" (xloops), "=&a" (d0) |
38 | :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); | 85 | :"1" (xloops), "0" |
39 | __delay(++xloops); | 86 | (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); |
87 | |||
88 | __delay(++xloops); | ||
40 | } | 89 | } |
41 | 90 | ||
42 | void __udelay(unsigned long usecs) | 91 | void __udelay(unsigned long usecs) |
43 | { | 92 | { |
44 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | 93 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
45 | } | 94 | } |
46 | 95 | ||
47 | void __ndelay(unsigned long nsecs) | 96 | void __ndelay(unsigned long nsecs) |
48 | { | 97 | { |
49 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | 98 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
50 | } | 99 | } |
51 | 100 | ||
52 | EXPORT_SYMBOL(__delay); | 101 | EXPORT_SYMBOL(__delay); |