aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/tsc.c9
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c7
3 files changed, 17 insertions, 1 deletions
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index f64b81f3033b..ea63a30ca3e8 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -4,6 +4,7 @@
4 * See comments there for proper credits. 4 * See comments there for proper credits.
5 */ 5 */
6 6
7#include <linux/sched.h>
7#include <linux/clocksource.h> 8#include <linux/clocksource.h>
8#include <linux/workqueue.h> 9#include <linux/workqueue.h>
9#include <linux/cpufreq.h> 10#include <linux/cpufreq.h>
@@ -106,8 +107,13 @@ unsigned long long sched_clock(void)
106 107
107 /* 108 /*
108 * Fall back to jiffies if there's no TSC available: 109 * Fall back to jiffies if there's no TSC available:
110 * ( But note that we still use it if the TSC is marked
111 * unstable. We do this because unlike Time Of Day,
112 * the scheduler clock tolerates small errors and it's
113 * very important for it to be as fast as the platform
114 * can achive it. )
109 */ 115 */
110 if (unlikely(!tsc_enabled)) 116 if (unlikely(!tsc_enabled && !tsc_unstable))
111 /* No locking but a rare wrong value is not a big deal: */ 117 /* No locking but a rare wrong value is not a big deal: */
112 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 118 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
113 119
@@ -277,6 +283,7 @@ static struct clocksource clocksource_tsc = {
277 283
278void mark_tsc_unstable(char *reason) 284void mark_tsc_unstable(char *reason)
279{ 285{
286 sched_clock_unstable_event();
280 if (!tsc_unstable) { 287 if (!tsc_unstable) {
281 tsc_unstable = 1; 288 tsc_unstable = 1;
282 tsc_enabled = 0; 289 tsc_enabled = 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index be2460e6f55b..fa895b309da0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1321,6 +1321,8 @@ extern void sched_exec(void);
1321#define sched_exec() {} 1321#define sched_exec() {}
1322#endif 1322#endif
1323 1323
1324extern void sched_clock_unstable_event(void);
1325
1324#ifdef CONFIG_HOTPLUG_CPU 1326#ifdef CONFIG_HOTPLUG_CPU
1325extern void idle_task_exit(void); 1327extern void idle_task_exit(void);
1326#else 1328#else
diff --git a/kernel/sched.c b/kernel/sched.c
index 01ba4b1848a0..6150cd70f448 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -68,6 +68,13 @@ unsigned long long __attribute__((weak)) sched_clock(void)
68} 68}
69 69
70/* 70/*
71 * CPU frequency is/was unstable - start new by setting prev_clock_raw:
72 */
73void sched_clock_unstable_event(void)
74{
75}
76
77/*
71 * Convert user-nice values [ -20 ... 0 ... 19 ] 78 * Convert user-nice values [ -20 ... 0 ... 19 ]
72 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 79 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
73 * and back. 80 * and back.