diff options
-rw-r--r-- | arch/i386/Kconfig | 4 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 49 | ||||
-rw-r--r-- | include/linux/clocksource.h | 15 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 126 | ||||
-rw-r--r-- | kernel/timer.c | 45 |
5 files changed, 161 insertions, 78 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 4ea31c327d1f..458b3aad3eb3 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -18,6 +18,10 @@ config GENERIC_TIME | |||
18 | bool | 18 | bool |
19 | default y | 19 | default y |
20 | 20 | ||
21 | config CLOCKSOURCE_WATCHDOG | ||
22 | bool | ||
23 | default y | ||
24 | |||
21 | config LOCKDEP_SUPPORT | 25 | config LOCKDEP_SUPPORT |
22 | bool | 26 | bool |
23 | default y | 27 | default y |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index b4b2be21d1c7..22931d24027c 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -344,49 +344,6 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |||
344 | {} | 344 | {} |
345 | }; | 345 | }; |
346 | 346 | ||
347 | #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */ | ||
348 | static struct timer_list verify_tsc_freq_timer; | ||
349 | |||
350 | /* XXX - Probably should add locking */ | ||
351 | static void verify_tsc_freq(unsigned long unused) | ||
352 | { | ||
353 | static u64 last_tsc; | ||
354 | static unsigned long last_jiffies; | ||
355 | |||
356 | u64 now_tsc, interval_tsc; | ||
357 | unsigned long now_jiffies, interval_jiffies; | ||
358 | |||
359 | |||
360 | if (check_tsc_unstable()) | ||
361 | return; | ||
362 | |||
363 | rdtscll(now_tsc); | ||
364 | now_jiffies = jiffies; | ||
365 | |||
366 | if (!last_jiffies) { | ||
367 | goto out; | ||
368 | } | ||
369 | |||
370 | interval_jiffies = now_jiffies - last_jiffies; | ||
371 | interval_tsc = now_tsc - last_tsc; | ||
372 | interval_tsc *= HZ; | ||
373 | do_div(interval_tsc, cpu_khz*1000); | ||
374 | |||
375 | if (interval_tsc < (interval_jiffies * 3 / 4)) { | ||
376 | printk("TSC appears to be running slowly. " | ||
377 | "Marking it as unstable\n"); | ||
378 | mark_tsc_unstable(); | ||
379 | return; | ||
380 | } | ||
381 | |||
382 | out: | ||
383 | last_tsc = now_tsc; | ||
384 | last_jiffies = now_jiffies; | ||
385 | /* set us up to go off on the next interval: */ | ||
386 | mod_timer(&verify_tsc_freq_timer, | ||
387 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL)); | ||
388 | } | ||
389 | |||
390 | /* | 347 | /* |
391 | * Make an educated guess if the TSC is trustworthy and synchronized | 348 | * Make an educated guess if the TSC is trustworthy and synchronized |
392 | * over all CPUs. | 349 | * over all CPUs. |
@@ -424,12 +381,6 @@ static int __init init_tsc_clocksource(void) | |||
424 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 381 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
425 | } | 382 | } |
426 | 383 | ||
427 | init_timer(&verify_tsc_freq_timer); | ||
428 | verify_tsc_freq_timer.function = verify_tsc_freq; | ||
429 | verify_tsc_freq_timer.expires = | ||
430 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL); | ||
431 | add_timer(&verify_tsc_freq_timer); | ||
432 | |||
433 | return clocksource_register(&clocksource_tsc); | 384 | return clocksource_register(&clocksource_tsc); |
434 | } | 385 | } |
435 | 386 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a585a29fe7c4..830a250ecf94 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -12,11 +12,13 @@ | |||
12 | #include <linux/timex.h> | 12 | #include <linux/timex.h> |
13 | #include <linux/time.h> | 13 | #include <linux/time.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/timer.h> | ||
15 | #include <asm/div64.h> | 16 | #include <asm/div64.h> |
16 | #include <asm/io.h> | 17 | #include <asm/io.h> |
17 | 18 | ||
18 | /* clocksource cycle base type */ | 19 | /* clocksource cycle base type */ |
19 | typedef u64 cycle_t; | 20 | typedef u64 cycle_t; |
21 | struct clocksource; | ||
20 | 22 | ||
21 | /** | 23 | /** |
22 | * struct clocksource - hardware abstraction for a free running counter | 24 | * struct clocksource - hardware abstraction for a free running counter |
@@ -62,13 +64,22 @@ struct clocksource { | |||
62 | cycle_t cycle_last, cycle_interval; | 64 | cycle_t cycle_last, cycle_interval; |
63 | u64 xtime_nsec, xtime_interval; | 65 | u64 xtime_nsec, xtime_interval; |
64 | s64 error; | 66 | s64 error; |
67 | |||
68 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | ||
69 | /* Watchdog related data, used by the framework */ | ||
70 | struct list_head wd_list; | ||
71 | cycle_t wd_last; | ||
72 | #endif | ||
65 | }; | 73 | }; |
66 | 74 | ||
67 | /* | 75 | /* |
68 | * Clock source flags bits:: | 76 | * Clock source flags bits:: |
69 | */ | 77 | */ |
70 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 | 78 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
71 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 | 79 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
80 | |||
81 | #define CLOCK_SOURCE_WATCHDOG 0x10 | ||
82 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 | ||
72 | 83 | ||
73 | /* simplify initialization of mask field */ | 84 | /* simplify initialization of mask field */ |
74 | #define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) | 85 | #define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 2f6a3d6e43bc..3cb8ac978270 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -63,9 +63,116 @@ static int __init clocksource_done_booting(void) | |||
63 | finished_booting = 1; | 63 | finished_booting = 1; |
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | |||
67 | late_initcall(clocksource_done_booting); | 66 | late_initcall(clocksource_done_booting); |
68 | 67 | ||
68 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | ||
69 | static LIST_HEAD(watchdog_list); | ||
70 | static struct clocksource *watchdog; | ||
71 | static struct timer_list watchdog_timer; | ||
72 | static DEFINE_SPINLOCK(watchdog_lock); | ||
73 | static cycle_t watchdog_last; | ||
74 | /* | ||
75 | * Interval: 0.5sec Treshold: 0.0625s | ||
76 | */ | ||
77 | #define WATCHDOG_INTERVAL (HZ >> 1) | ||
78 | #define WATCHDOG_TRESHOLD (NSEC_PER_SEC >> 4) | ||
79 | |||
80 | static void clocksource_ratewd(struct clocksource *cs, int64_t delta) | ||
81 | { | ||
82 | if (delta > -WATCHDOG_TRESHOLD && delta < WATCHDOG_TRESHOLD) | ||
83 | return; | ||
84 | |||
85 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", | ||
86 | cs->name, delta); | ||
87 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | ||
88 | clocksource_change_rating(cs, 0); | ||
89 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; | ||
90 | list_del(&cs->wd_list); | ||
91 | } | ||
92 | |||
93 | static void clocksource_watchdog(unsigned long data) | ||
94 | { | ||
95 | struct clocksource *cs, *tmp; | ||
96 | cycle_t csnow, wdnow; | ||
97 | int64_t wd_nsec, cs_nsec; | ||
98 | |||
99 | spin_lock(&watchdog_lock); | ||
100 | |||
101 | wdnow = watchdog->read(); | ||
102 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | ||
103 | watchdog_last = wdnow; | ||
104 | |||
105 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | ||
106 | csnow = cs->read(); | ||
107 | /* Initialized ? */ | ||
108 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | ||
109 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && | ||
110 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { | ||
111 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | ||
112 | } | ||
113 | cs->flags |= CLOCK_SOURCE_WATCHDOG; | ||
114 | cs->wd_last = csnow; | ||
115 | } else { | ||
116 | cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); | ||
117 | cs->wd_last = csnow; | ||
118 | /* Check the delta. Might remove from the list ! */ | ||
119 | clocksource_ratewd(cs, cs_nsec - wd_nsec); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | if (!list_empty(&watchdog_list)) { | ||
124 | __mod_timer(&watchdog_timer, | ||
125 | watchdog_timer.expires + WATCHDOG_INTERVAL); | ||
126 | } | ||
127 | spin_unlock(&watchdog_lock); | ||
128 | } | ||
129 | static void clocksource_check_watchdog(struct clocksource *cs) | ||
130 | { | ||
131 | struct clocksource *cse; | ||
132 | unsigned long flags; | ||
133 | |||
134 | spin_lock_irqsave(&watchdog_lock, flags); | ||
135 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | ||
136 | int started = !list_empty(&watchdog_list); | ||
137 | |||
138 | list_add(&cs->wd_list, &watchdog_list); | ||
139 | if (!started && watchdog) { | ||
140 | watchdog_last = watchdog->read(); | ||
141 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | ||
142 | add_timer(&watchdog_timer); | ||
143 | } | ||
144 | } else if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) { | ||
145 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | ||
146 | |||
147 | if (!watchdog || cs->rating > watchdog->rating) { | ||
148 | if (watchdog) | ||
149 | del_timer(&watchdog_timer); | ||
150 | watchdog = cs; | ||
151 | init_timer(&watchdog_timer); | ||
152 | watchdog_timer.function = clocksource_watchdog; | ||
153 | |||
154 | /* Reset watchdog cycles */ | ||
155 | list_for_each_entry(cse, &watchdog_list, wd_list) | ||
156 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; | ||
157 | /* Start if list is not empty */ | ||
158 | if (!list_empty(&watchdog_list)) { | ||
159 | watchdog_last = watchdog->read(); | ||
160 | watchdog_timer.expires = | ||
161 | jiffies + WATCHDOG_INTERVAL; | ||
162 | add_timer(&watchdog_timer); | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
167 | } | ||
168 | #else | ||
169 | static void clocksource_check_watchdog(struct clocksource *cs) | ||
170 | { | ||
171 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | ||
172 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | ||
173 | } | ||
174 | #endif | ||
175 | |||
69 | /** | 176 | /** |
70 | * clocksource_get_next - Returns the selected clocksource | 177 | * clocksource_get_next - Returns the selected clocksource |
71 | * | 178 | * |
@@ -94,13 +201,21 @@ struct clocksource *clocksource_get_next(void) | |||
94 | */ | 201 | */ |
95 | static struct clocksource *select_clocksource(void) | 202 | static struct clocksource *select_clocksource(void) |
96 | { | 203 | { |
204 | struct clocksource *next; | ||
205 | |||
97 | if (list_empty(&clocksource_list)) | 206 | if (list_empty(&clocksource_list)) |
98 | return NULL; | 207 | return NULL; |
99 | 208 | ||
100 | if (clocksource_override) | 209 | if (clocksource_override) |
101 | return clocksource_override; | 210 | next = clocksource_override; |
211 | else | ||
212 | next = list_entry(clocksource_list.next, struct clocksource, | ||
213 | list); | ||
102 | 214 | ||
103 | return list_entry(clocksource_list.next, struct clocksource, list); | 215 | if (next == curr_clocksource) |
216 | return NULL; | ||
217 | |||
218 | return next; | ||
104 | } | 219 | } |
105 | 220 | ||
106 | /* | 221 | /* |
@@ -138,13 +253,15 @@ static int clocksource_enqueue(struct clocksource *c) | |||
138 | int clocksource_register(struct clocksource *c) | 253 | int clocksource_register(struct clocksource *c) |
139 | { | 254 | { |
140 | unsigned long flags; | 255 | unsigned long flags; |
141 | int ret = 0; | 256 | int ret; |
142 | 257 | ||
143 | spin_lock_irqsave(&clocksource_lock, flags); | 258 | spin_lock_irqsave(&clocksource_lock, flags); |
144 | ret = clocksource_enqueue(c); | 259 | ret = clocksource_enqueue(c); |
145 | if (!ret) | 260 | if (!ret) |
146 | next_clocksource = select_clocksource(); | 261 | next_clocksource = select_clocksource(); |
147 | spin_unlock_irqrestore(&clocksource_lock, flags); | 262 | spin_unlock_irqrestore(&clocksource_lock, flags); |
263 | if (!ret) | ||
264 | clocksource_check_watchdog(c); | ||
148 | return ret; | 265 | return ret; |
149 | } | 266 | } |
150 | EXPORT_SYMBOL(clocksource_register); | 267 | EXPORT_SYMBOL(clocksource_register); |
@@ -159,6 +276,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) | |||
159 | 276 | ||
160 | spin_lock_irqsave(&clocksource_lock, flags); | 277 | spin_lock_irqsave(&clocksource_lock, flags); |
161 | list_del(&cs->list); | 278 | list_del(&cs->list); |
279 | cs->rating = rating; | ||
162 | clocksource_enqueue(cs); | 280 | clocksource_enqueue(cs); |
163 | next_clocksource = select_clocksource(); | 281 | next_clocksource = select_clocksource(); |
164 | spin_unlock_irqrestore(&clocksource_lock, flags); | 282 | spin_unlock_irqrestore(&clocksource_lock, flags); |
diff --git a/kernel/timer.c b/kernel/timer.c index 4b088fcadb3f..b68a21a82e17 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -832,30 +832,33 @@ EXPORT_SYMBOL(do_settimeofday); | |||
832 | * | 832 | * |
833 | * Accumulates current time interval and initializes new clocksource | 833 | * Accumulates current time interval and initializes new clocksource |
834 | */ | 834 | */ |
835 | static int change_clocksource(void) | 835 | static void change_clocksource(void) |
836 | { | 836 | { |
837 | struct clocksource *new; | 837 | struct clocksource *new; |
838 | cycle_t now; | 838 | cycle_t now; |
839 | u64 nsec; | 839 | u64 nsec; |
840 | |||
840 | new = clocksource_get_next(); | 841 | new = clocksource_get_next(); |
841 | if (clock != new) { | 842 | |
842 | now = clocksource_read(new); | 843 | if (clock == new) |
843 | nsec = __get_nsec_offset(); | 844 | return; |
844 | timespec_add_ns(&xtime, nsec); | 845 | |
845 | 846 | now = clocksource_read(new); | |
846 | clock = new; | 847 | nsec = __get_nsec_offset(); |
847 | clock->cycle_last = now; | 848 | timespec_add_ns(&xtime, nsec); |
848 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | 849 | |
849 | clock->name); | 850 | clock = new; |
850 | return 1; | 851 | clock->cycle_last = now; |
851 | } | 852 | |
852 | return 0; | 853 | clock->error = 0; |
854 | clock->xtime_nsec = 0; | ||
855 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | ||
856 | |||
857 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | ||
858 | clock->name); | ||
853 | } | 859 | } |
854 | #else | 860 | #else |
855 | static inline int change_clocksource(void) | 861 | static inline void change_clocksource(void) { } |
856 | { | ||
857 | return 0; | ||
858 | } | ||
859 | #endif | 862 | #endif |
860 | 863 | ||
861 | /** | 864 | /** |
@@ -869,7 +872,7 @@ int timekeeping_is_continuous(void) | |||
869 | do { | 872 | do { |
870 | seq = read_seqbegin(&xtime_lock); | 873 | seq = read_seqbegin(&xtime_lock); |
871 | 874 | ||
872 | ret = clock->flags & CLOCK_SOURCE_IS_CONTINUOUS; | 875 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
873 | 876 | ||
874 | } while (read_seqretry(&xtime_lock, seq)); | 877 | } while (read_seqretry(&xtime_lock, seq)); |
875 | 878 | ||
@@ -1124,11 +1127,7 @@ static void update_wall_time(void) | |||
1124 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; | 1127 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
1125 | 1128 | ||
1126 | /* check to see if there is a new clocksource to use */ | 1129 | /* check to see if there is a new clocksource to use */ |
1127 | if (change_clocksource()) { | 1130 | change_clocksource(); |
1128 | clock->error = 0; | ||
1129 | clock->xtime_nsec = 0; | ||
1130 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | ||
1131 | } | ||
1132 | } | 1131 | } |
1133 | 1132 | ||
1134 | /* | 1133 | /* |