diff options
Diffstat (limited to 'arch/x86_64/kernel/tsc.c')
-rw-r--r-- | arch/x86_64/kernel/tsc.c | 103 |
1 files changed, 55 insertions, 48 deletions
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c index 2dbac15ab1f0..8c92f2fe7e2e 100644 --- a/arch/x86_64/kernel/tsc.c +++ b/arch/x86_64/kernel/tsc.c | |||
@@ -9,32 +9,11 @@ | |||
9 | 9 | ||
10 | #include <asm/timex.h> | 10 | #include <asm/timex.h> |
11 | 11 | ||
12 | int notsc __initdata = 0; | 12 | static int notsc __initdata = 0; |
13 | 13 | ||
14 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | 14 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ |
15 | EXPORT_SYMBOL(cpu_khz); | 15 | EXPORT_SYMBOL(cpu_khz); |
16 | 16 | ||
17 | /* | ||
18 | * do_gettimeoffset() returns microseconds since last timer interrupt was | ||
19 | * triggered by hardware. A memory read of HPET is slower than a register read | ||
20 | * of TSC, but much more reliable. It's also synchronized to the timer | ||
21 | * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a | ||
22 | * timer interrupt has happened already, but vxtime.trigger wasn't updated yet. | ||
23 | * This is not a problem, because jiffies hasn't updated either. They are bound | ||
24 | * together by xtime_lock. | ||
25 | */ | ||
26 | |||
27 | unsigned int do_gettimeoffset_tsc(void) | ||
28 | { | ||
29 | unsigned long t; | ||
30 | unsigned long x; | ||
31 | t = get_cycles_sync(); | ||
32 | if (t < vxtime.last_tsc) | ||
33 | t = vxtime.last_tsc; /* hack */ | ||
34 | x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE; | ||
35 | return x; | ||
36 | } | ||
37 | |||
38 | static unsigned int cyc2ns_scale __read_mostly; | 17 | static unsigned int cyc2ns_scale __read_mostly; |
39 | 18 | ||
40 | void set_cyc2ns_scale(unsigned long khz) | 19 | void set_cyc2ns_scale(unsigned long khz) |
@@ -42,7 +21,7 @@ void set_cyc2ns_scale(unsigned long khz) | |||
42 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; | 21 | cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; |
43 | } | 22 | } |
44 | 23 | ||
45 | unsigned long long cycles_2_ns(unsigned long long cyc) | 24 | static unsigned long long cycles_2_ns(unsigned long long cyc) |
46 | { | 25 | { |
47 | return (cyc * cyc2ns_scale) >> NS_SCALE; | 26 | return (cyc * cyc2ns_scale) >> NS_SCALE; |
48 | } | 27 | } |
@@ -61,6 +40,12 @@ unsigned long long sched_clock(void) | |||
61 | return cycles_2_ns(a); | 40 | return cycles_2_ns(a); |
62 | } | 41 | } |
63 | 42 | ||
43 | static int tsc_unstable; | ||
44 | |||
45 | static inline int check_tsc_unstable(void) | ||
46 | { | ||
47 | return tsc_unstable; | ||
48 | } | ||
64 | #ifdef CONFIG_CPU_FREQ | 49 | #ifdef CONFIG_CPU_FREQ |
65 | 50 | ||
66 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | 51 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency |
@@ -89,24 +74,6 @@ static void handle_cpufreq_delayed_get(struct work_struct *v) | |||
89 | cpufreq_delayed_issched = 0; | 74 | cpufreq_delayed_issched = 0; |
90 | } | 75 | } |
91 | 76 | ||
92 | /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries | ||
93 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
94 | * at is still correct. | ||
95 | */ | ||
96 | void cpufreq_delayed_get(void) | ||
97 | { | ||
98 | static int warned; | ||
99 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
100 | cpufreq_delayed_issched = 1; | ||
101 | if (!warned) { | ||
102 | warned = 1; | ||
103 | printk(KERN_DEBUG "Losing some ticks... " | ||
104 | "checking if CPU frequency changed.\n"); | ||
105 | } | ||
106 | schedule_work(&cpufreq_delayed_get_work); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static unsigned int ref_freq = 0; | 77 | static unsigned int ref_freq = 0; |
111 | static unsigned long loops_per_jiffy_ref = 0; | 78 | static unsigned long loops_per_jiffy_ref = 0; |
112 | 79 | ||
@@ -142,7 +109,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
142 | 109 | ||
143 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); | 110 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); |
144 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | 111 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
145 | vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; | 112 | mark_tsc_unstable(); |
146 | } | 113 | } |
147 | 114 | ||
148 | set_cyc2ns_scale(cpu_khz_ref); | 115 | set_cyc2ns_scale(cpu_khz_ref); |
@@ -169,12 +136,6 @@ core_initcall(cpufreq_tsc); | |||
169 | 136 | ||
170 | static int tsc_unstable = 0; | 137 | static int tsc_unstable = 0; |
171 | 138 | ||
172 | void mark_tsc_unstable(void) | ||
173 | { | ||
174 | tsc_unstable = 1; | ||
175 | } | ||
176 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
177 | |||
178 | /* | 139 | /* |
179 | * Make an educated guess if the TSC is trustworthy and synchronized | 140 | * Make an educated guess if the TSC is trustworthy and synchronized |
180 | * over all CPUs. | 141 | * over all CPUs. |
@@ -210,3 +171,49 @@ int __init notsc_setup(char *s) | |||
210 | } | 171 | } |
211 | 172 | ||
212 | __setup("notsc", notsc_setup); | 173 | __setup("notsc", notsc_setup); |
174 | |||
175 | |||
176 | /* clock source code: */ | ||
177 | static cycle_t read_tsc(void) | ||
178 | { | ||
179 | cycle_t ret = (cycle_t)get_cycles_sync(); | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | static struct clocksource clocksource_tsc = { | ||
184 | .name = "tsc", | ||
185 | .rating = 300, | ||
186 | .read = read_tsc, | ||
187 | .mask = CLOCKSOURCE_MASK(64), | ||
188 | .shift = 22, | ||
189 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
190 | CLOCK_SOURCE_MUST_VERIFY, | ||
191 | }; | ||
192 | |||
193 | void mark_tsc_unstable(void) | ||
194 | { | ||
195 | if (!tsc_unstable) { | ||
196 | tsc_unstable = 1; | ||
197 | /* Change only the rating, when not registered */ | ||
198 | if (clocksource_tsc.mult) | ||
199 | clocksource_change_rating(&clocksource_tsc, 0); | ||
200 | else | ||
201 | clocksource_tsc.rating = 0; | ||
202 | } | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
205 | |||
206 | static int __init init_tsc_clocksource(void) | ||
207 | { | ||
208 | if (!notsc) { | ||
209 | clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, | ||
210 | clocksource_tsc.shift); | ||
211 | if (check_tsc_unstable()) | ||
212 | clocksource_tsc.rating = 0; | ||
213 | |||
214 | return clocksource_register(&clocksource_tsc); | ||
215 | } | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | module_init(init_tsc_clocksource); | ||