aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/i8253.c19
-rw-r--r--arch/x86/kernel/rtc.c5
-rw-r--r--arch/x86/kernel/tsc.c14
-rw-r--r--arch/x86/kernel/vsyscall_64.c1
4 files changed, 15 insertions, 24 deletions
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 5cf36c053ac4..23c167925a5c 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -19,12 +19,6 @@
19DEFINE_SPINLOCK(i8253_lock); 19DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 20EXPORT_SYMBOL(i8253_lock);
21 21
22#ifdef CONFIG_X86_32
23static void pit_disable_clocksource(void);
24#else
25static inline void pit_disable_clocksource(void) { }
26#endif
27
28/* 22/*
29 * HPET replaces the PIT, when enabled. So we need to know, which of 23 * HPET replaces the PIT, when enabled. So we need to know, which of
30 * the two timers is used 24 * the two timers is used
@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode,
57 outb_pit(0, PIT_CH0); 51 outb_pit(0, PIT_CH0);
58 outb_pit(0, PIT_CH0); 52 outb_pit(0, PIT_CH0);
59 } 53 }
60 pit_disable_clocksource();
61 break; 54 break;
62 55
63 case CLOCK_EVT_MODE_ONESHOT: 56 case CLOCK_EVT_MODE_ONESHOT:
64 /* One shot setup */ 57 /* One shot setup */
65 pit_disable_clocksource();
66 outb_pit(0x38, PIT_MODE); 58 outb_pit(0x38, PIT_MODE);
67 break; 59 break;
68 60
@@ -200,17 +192,6 @@ static struct clocksource pit_cs = {
200 .shift = 20, 192 .shift = 20,
201}; 193};
202 194
203static void pit_disable_clocksource(void)
204{
205 /*
206 * Use mult to check whether it is registered or not
207 */
208 if (pit_cs.mult) {
209 clocksource_unregister(&pit_cs);
210 pit_cs.mult = 0;
211 }
212}
213
214static int __init init_pit_clocksource(void) 195static int __init init_pit_clocksource(void)
215{ 196{
216 /* 197 /*
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 5d465b207e72..bf67dcb4a44c 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime)
178} 178}
179 179
180/* not static: needed by APM */ 180/* not static: needed by APM */
181unsigned long read_persistent_clock(void) 181void read_persistent_clock(struct timespec *ts)
182{ 182{
183 unsigned long retval, flags; 183 unsigned long retval, flags;
184 184
@@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void)
186 retval = get_wallclock(); 186 retval = get_wallclock();
187 spin_unlock_irqrestore(&rtc_lock, flags); 187 spin_unlock_irqrestore(&rtc_lock, flags);
188 188
189 return retval; 189 ts->tv_sec = retval;
190 ts->tv_nsec = 0;
190} 191}
191 192
192int update_persistent_clock(struct timespec now) 193int update_persistent_clock(struct timespec now)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 71f4368b357e..fc3672a303d6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -744,10 +744,16 @@ static cycle_t __vsyscall_fn vread_tsc(void)
744} 744}
745#endif 745#endif
746 746
747static void resume_tsc(void)
748{
749 clocksource_tsc.cycle_last = 0;
750}
751
747static struct clocksource clocksource_tsc = { 752static struct clocksource clocksource_tsc = {
748 .name = "tsc", 753 .name = "tsc",
749 .rating = 300, 754 .rating = 300,
750 .read = read_tsc, 755 .read = read_tsc,
756 .resume = resume_tsc,
751 .mask = CLOCKSOURCE_MASK(64), 757 .mask = CLOCKSOURCE_MASK(64),
752 .shift = 22, 758 .shift = 22,
753 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 759 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -761,12 +767,14 @@ void mark_tsc_unstable(char *reason)
761{ 767{
762 if (!tsc_unstable) { 768 if (!tsc_unstable) {
763 tsc_unstable = 1; 769 tsc_unstable = 1;
764 printk("Marking TSC unstable due to %s\n", reason); 770 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
765 /* Change only the rating, when not registered */ 771 /* Change only the rating, when not registered */
766 if (clocksource_tsc.mult) 772 if (clocksource_tsc.mult)
767 clocksource_change_rating(&clocksource_tsc, 0); 773 clocksource_mark_unstable(&clocksource_tsc);
768 else 774 else {
775 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
769 clocksource_tsc.rating = 0; 776 clocksource_tsc.rating = 0;
777 }
770 } 778 }
771} 779}
772 780
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 25ee06a80aad..cf53a78e2dcf 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
89 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; 89 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
90 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
90 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 91 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
91} 92}
92 93