diff options
-rw-r--r-- | arch/i386/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/numaq.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/timers/timer_tsc.c | 178 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 316 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 9 | ||||
-rw-r--r-- | include/asm-i386/mach-default/mach_timer.h | 4 | ||||
-rw-r--r-- | include/asm-i386/mach-summit/mach_mpparse.h | 3 | ||||
-rw-r--r-- | include/asm-i386/timex.h | 34 | ||||
-rw-r--r-- | include/asm-i386/tsc.h | 49 |
10 files changed, 389 insertions, 217 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 4142d69a5336..ca70d61ea834 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ |
8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ | 9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ |
10 | quirks.o i8237.o topology.o alternative.o i8253.o | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o |
11 | 11 | ||
12 | obj-y += cpu/ | 12 | obj-y += cpu/ |
13 | obj-y += timers/ | 13 | obj-y += timers/ |
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c index 5f5b075f860a..0caf14652bad 100644 --- a/arch/i386/kernel/numaq.c +++ b/arch/i386/kernel/numaq.c | |||
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void) | |||
79 | return 1; | 79 | return 1; |
80 | } | 80 | } |
81 | 81 | ||
82 | static int __init numaq_dsc_disable(void) | 82 | static int __init numaq_tsc_disable(void) |
83 | { | 83 | { |
84 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); | 84 | if (num_online_nodes() > 1) { |
85 | tsc_disable = 1; | 85 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); |
86 | tsc_disable = 1; | ||
87 | } | ||
86 | return 0; | 88 | return 0; |
87 | } | 89 | } |
88 | core_initcall(numaq_dsc_disable); | 90 | arch_initcall(numaq_tsc_disable); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 6bef9273733e..4a65040cc624 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -1575,6 +1575,7 @@ void __init setup_arch(char **cmdline_p) | |||
1575 | conswitchp = &dummy_con; | 1575 | conswitchp = &dummy_con; |
1576 | #endif | 1576 | #endif |
1577 | #endif | 1577 | #endif |
1578 | tsc_init(); | ||
1578 | } | 1579 | } |
1579 | 1580 | ||
1580 | static __init int add_pcspkr(void) | 1581 | static __init int add_pcspkr(void) |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index f1187ddb0d0f..243ec0484079 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -32,10 +32,6 @@ static unsigned long hpet_last; | |||
32 | static struct timer_opts timer_tsc; | 32 | static struct timer_opts timer_tsc; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static inline void cpufreq_delayed_get(void); | ||
36 | |||
37 | int tsc_disable __devinitdata = 0; | ||
38 | |||
39 | static int use_tsc; | 35 | static int use_tsc; |
40 | /* Number of usecs that the last interrupt was delayed */ | 36 | /* Number of usecs that the last interrupt was delayed */ |
41 | static int delay_at_last_interrupt; | 37 | static int delay_at_last_interrupt; |
@@ -144,30 +140,6 @@ static unsigned long long monotonic_clock_tsc(void) | |||
144 | return base + cycles_2_ns(this_offset - last_offset); | 140 | return base + cycles_2_ns(this_offset - last_offset); |
145 | } | 141 | } |
146 | 142 | ||
147 | /* | ||
148 | * Scheduler clock - returns current time in nanosec units. | ||
149 | */ | ||
150 | unsigned long long sched_clock(void) | ||
151 | { | ||
152 | unsigned long long this_offset; | ||
153 | |||
154 | /* | ||
155 | * In the NUMA case we dont use the TSC as they are not | ||
156 | * synchronized across all CPUs. | ||
157 | */ | ||
158 | #ifndef CONFIG_NUMA | ||
159 | if (!use_tsc) | ||
160 | #endif | ||
161 | /* no locking but a rare wrong value is not a big deal */ | ||
162 | return jiffies_64 * (1000000000 / HZ); | ||
163 | |||
164 | /* Read the Time Stamp Counter */ | ||
165 | rdtscll(this_offset); | ||
166 | |||
167 | /* return the value in ns */ | ||
168 | return cycles_2_ns(this_offset); | ||
169 | } | ||
170 | |||
171 | static void delay_tsc(unsigned long loops) | 143 | static void delay_tsc(unsigned long loops) |
172 | { | 144 | { |
173 | unsigned long bclock, now; | 145 | unsigned long bclock, now; |
@@ -231,136 +203,6 @@ static void mark_offset_tsc_hpet(void) | |||
231 | } | 203 | } |
232 | #endif | 204 | #endif |
233 | 205 | ||
234 | |||
235 | #ifdef CONFIG_CPU_FREQ | ||
236 | #include <linux/workqueue.h> | ||
237 | |||
238 | static unsigned int cpufreq_delayed_issched = 0; | ||
239 | static unsigned int cpufreq_init = 0; | ||
240 | static struct work_struct cpufreq_delayed_get_work; | ||
241 | |||
242 | static void handle_cpufreq_delayed_get(void *v) | ||
243 | { | ||
244 | unsigned int cpu; | ||
245 | for_each_online_cpu(cpu) { | ||
246 | cpufreq_get(cpu); | ||
247 | } | ||
248 | cpufreq_delayed_issched = 0; | ||
249 | } | ||
250 | |||
251 | /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries | ||
252 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
253 | * at is still correct. | ||
254 | */ | ||
255 | static inline void cpufreq_delayed_get(void) | ||
256 | { | ||
257 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
258 | cpufreq_delayed_issched = 1; | ||
259 | printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n"); | ||
260 | schedule_work(&cpufreq_delayed_get_work); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /* If the CPU frequency is scaled, TSC-based delays will need a different | ||
265 | * loops_per_jiffy value to function properly. | ||
266 | */ | ||
267 | |||
268 | static unsigned int ref_freq = 0; | ||
269 | static unsigned long loops_per_jiffy_ref = 0; | ||
270 | |||
271 | #ifndef CONFIG_SMP | ||
272 | static unsigned long fast_gettimeoffset_ref = 0; | ||
273 | static unsigned int cpu_khz_ref = 0; | ||
274 | #endif | ||
275 | |||
276 | static int | ||
277 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
278 | void *data) | ||
279 | { | ||
280 | struct cpufreq_freqs *freq = data; | ||
281 | |||
282 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
283 | write_seqlock_irq(&xtime_lock); | ||
284 | if (!ref_freq) { | ||
285 | if (!freq->old){ | ||
286 | ref_freq = freq->new; | ||
287 | goto end; | ||
288 | } | ||
289 | ref_freq = freq->old; | ||
290 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
291 | #ifndef CONFIG_SMP | ||
292 | fast_gettimeoffset_ref = fast_gettimeoffset_quotient; | ||
293 | cpu_khz_ref = cpu_khz; | ||
294 | #endif | ||
295 | } | ||
296 | |||
297 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
298 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
299 | (val == CPUFREQ_RESUMECHANGE)) { | ||
300 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
301 | cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | ||
302 | #ifndef CONFIG_SMP | ||
303 | if (cpu_khz) | ||
304 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); | ||
305 | if (use_tsc) { | ||
306 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
307 | fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); | ||
308 | set_cyc2ns_scale(cpu_khz); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | } | ||
313 | |||
314 | end: | ||
315 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
316 | write_sequnlock_irq(&xtime_lock); | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static struct notifier_block time_cpufreq_notifier_block = { | ||
322 | .notifier_call = time_cpufreq_notifier | ||
323 | }; | ||
324 | |||
325 | |||
326 | static int __init cpufreq_tsc(void) | ||
327 | { | ||
328 | int ret; | ||
329 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | ||
330 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
331 | CPUFREQ_TRANSITION_NOTIFIER); | ||
332 | if (!ret) | ||
333 | cpufreq_init = 1; | ||
334 | return ret; | ||
335 | } | ||
336 | core_initcall(cpufreq_tsc); | ||
337 | |||
338 | #else /* CONFIG_CPU_FREQ */ | ||
339 | static inline void cpufreq_delayed_get(void) { return; } | ||
340 | #endif | ||
341 | |||
342 | int recalibrate_cpu_khz(void) | ||
343 | { | ||
344 | #ifndef CONFIG_SMP | ||
345 | unsigned int cpu_khz_old = cpu_khz; | ||
346 | |||
347 | if (cpu_has_tsc) { | ||
348 | local_irq_disable(); | ||
349 | init_cpu_khz(); | ||
350 | local_irq_enable(); | ||
351 | cpu_data[0].loops_per_jiffy = | ||
352 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
353 | cpu_khz_old, | ||
354 | cpu_khz); | ||
355 | return 0; | ||
356 | } else | ||
357 | return -ENODEV; | ||
358 | #else | ||
359 | return -ENODEV; | ||
360 | #endif | ||
361 | } | ||
362 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
363 | |||
364 | static void mark_offset_tsc(void) | 206 | static void mark_offset_tsc(void) |
365 | { | 207 | { |
366 | unsigned long lost,delay; | 208 | unsigned long lost,delay; |
@@ -451,9 +293,6 @@ static void mark_offset_tsc(void) | |||
451 | 293 | ||
452 | clock_fallback(); | 294 | clock_fallback(); |
453 | } | 295 | } |
454 | /* ... but give the TSC a fair chance */ | ||
455 | if (lost_count > 25) | ||
456 | cpufreq_delayed_get(); | ||
457 | } else | 296 | } else |
458 | lost_count = 0; | 297 | lost_count = 0; |
459 | /* update the monotonic base value */ | 298 | /* update the monotonic base value */ |
@@ -578,23 +417,6 @@ static int tsc_resume(void) | |||
578 | return 0; | 417 | return 0; |
579 | } | 418 | } |
580 | 419 | ||
581 | #ifndef CONFIG_X86_TSC | ||
582 | /* disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
583 | * in cpu/common.c */ | ||
584 | static int __init tsc_setup(char *str) | ||
585 | { | ||
586 | tsc_disable = 1; | ||
587 | return 1; | ||
588 | } | ||
589 | #else | ||
590 | static int __init tsc_setup(char *str) | ||
591 | { | ||
592 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
593 | "cannot disable TSC.\n"); | ||
594 | return 1; | ||
595 | } | ||
596 | #endif | ||
597 | __setup("notsc", tsc_setup); | ||
598 | 420 | ||
599 | 421 | ||
600 | 422 | ||
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c new file mode 100644 index 000000000000..3b64eaafce2b --- /dev/null +++ b/arch/i386/kernel/tsc.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | ||
3 | * which was originally moved from arch/i386/kernel/time.c. | ||
4 | * See comments there for proper credits. | ||
5 | */ | ||
6 | |||
7 | #include <linux/workqueue.h> | ||
8 | #include <linux/cpufreq.h> | ||
9 | #include <linux/jiffies.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | #include <asm/tsc.h> | ||
13 | #include <asm/io.h> | ||
14 | |||
15 | #include "mach_timer.h" | ||
16 | |||
17 | /* | ||
18 | * On some systems the TSC frequency does not | ||
19 | * change with the cpu frequency. So we need | ||
20 | * an extra value to store the TSC freq | ||
21 | */ | ||
22 | unsigned int tsc_khz; | ||
23 | |||
24 | int tsc_disable __cpuinitdata = 0; | ||
25 | |||
26 | #ifdef CONFIG_X86_TSC | ||
27 | static int __init tsc_setup(char *str) | ||
28 | { | ||
29 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
30 | "cannot disable TSC.\n"); | ||
31 | return 1; | ||
32 | } | ||
33 | #else | ||
34 | /* | ||
35 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
36 | * in cpu/common.c | ||
37 | */ | ||
38 | static int __init tsc_setup(char *str) | ||
39 | { | ||
40 | tsc_disable = 1; | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | __setup("notsc", tsc_setup); | ||
47 | |||
48 | |||
49 | /* | ||
50 | * code to mark and check if the TSC is unstable | ||
51 | * due to cpufreq or due to unsynced TSCs | ||
52 | */ | ||
53 | static int tsc_unstable; | ||
54 | |||
55 | static inline int check_tsc_unstable(void) | ||
56 | { | ||
57 | return tsc_unstable; | ||
58 | } | ||
59 | |||
60 | void mark_tsc_unstable(void) | ||
61 | { | ||
62 | tsc_unstable = 1; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
65 | |||
66 | /* Accellerators for sched_clock() | ||
67 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
68 | * basic equation: | ||
69 | * ns = cycles / (freq / ns_per_sec) | ||
70 | * ns = cycles * (ns_per_sec / freq) | ||
71 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
72 | * ns = cycles * (10^6 / cpu_khz) | ||
73 | * | ||
74 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
75 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
76 | * ns = cycles * cyc2ns_scale / SC | ||
77 | * | ||
78 | * And since SC is a constant power of two, we can convert the div | ||
79 | * into a shift. | ||
80 | * | ||
81 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
82 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
83 | * (mathieu.desnoyers@polymtl.ca) | ||
84 | * | ||
85 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
86 | */ | ||
87 | static unsigned long cyc2ns_scale __read_mostly; | ||
88 | |||
89 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
90 | |||
91 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | ||
92 | { | ||
93 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
94 | } | ||
95 | |||
96 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
97 | { | ||
98 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Scheduler clock - returns current time in nanosec units. | ||
103 | */ | ||
104 | unsigned long long sched_clock(void) | ||
105 | { | ||
106 | unsigned long long this_offset; | ||
107 | |||
108 | /* | ||
109 | * in the NUMA case we dont use the TSC as they are not | ||
110 | * synchronized across all CPUs. | ||
111 | */ | ||
112 | #ifndef CONFIG_NUMA | ||
113 | if (!cpu_khz || check_tsc_unstable()) | ||
114 | #endif | ||
115 | /* no locking but a rare wrong value is not a big deal */ | ||
116 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | ||
117 | |||
118 | /* read the Time Stamp Counter: */ | ||
119 | rdtscll(this_offset); | ||
120 | |||
121 | /* return the value in ns */ | ||
122 | return cycles_2_ns(this_offset); | ||
123 | } | ||
124 | |||
125 | static unsigned long calculate_cpu_khz(void) | ||
126 | { | ||
127 | unsigned long long start, end; | ||
128 | unsigned long count; | ||
129 | u64 delta64; | ||
130 | int i; | ||
131 | unsigned long flags; | ||
132 | |||
133 | local_irq_save(flags); | ||
134 | |||
135 | /* run 3 times to ensure the cache is warm */ | ||
136 | for (i = 0; i < 3; i++) { | ||
137 | mach_prepare_counter(); | ||
138 | rdtscll(start); | ||
139 | mach_countup(&count); | ||
140 | rdtscll(end); | ||
141 | } | ||
142 | /* | ||
143 | * Error: ECTCNEVERSET | ||
144 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
145 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
146 | * 32 bits.. | ||
147 | */ | ||
148 | if (count <= 1) | ||
149 | goto err; | ||
150 | |||
151 | delta64 = end - start; | ||
152 | |||
153 | /* cpu freq too fast: */ | ||
154 | if (delta64 > (1ULL<<32)) | ||
155 | goto err; | ||
156 | |||
157 | /* cpu freq too slow: */ | ||
158 | if (delta64 <= CALIBRATE_TIME_MSEC) | ||
159 | goto err; | ||
160 | |||
161 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
162 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
163 | |||
164 | local_irq_restore(flags); | ||
165 | return (unsigned long)delta64; | ||
166 | err: | ||
167 | local_irq_restore(flags); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | int recalibrate_cpu_khz(void) | ||
172 | { | ||
173 | #ifndef CONFIG_SMP | ||
174 | unsigned long cpu_khz_old = cpu_khz; | ||
175 | |||
176 | if (cpu_has_tsc) { | ||
177 | cpu_khz = calculate_cpu_khz(); | ||
178 | tsc_khz = cpu_khz; | ||
179 | cpu_data[0].loops_per_jiffy = | ||
180 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
181 | cpu_khz_old, cpu_khz); | ||
182 | return 0; | ||
183 | } else | ||
184 | return -ENODEV; | ||
185 | #else | ||
186 | return -ENODEV; | ||
187 | #endif | ||
188 | } | ||
189 | |||
190 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
191 | |||
192 | void tsc_init(void) | ||
193 | { | ||
194 | if (!cpu_has_tsc || tsc_disable) | ||
195 | return; | ||
196 | |||
197 | cpu_khz = calculate_cpu_khz(); | ||
198 | tsc_khz = cpu_khz; | ||
199 | |||
200 | if (!cpu_khz) | ||
201 | return; | ||
202 | |||
203 | printk("Detected %lu.%03lu MHz processor.\n", | ||
204 | (unsigned long)cpu_khz / 1000, | ||
205 | (unsigned long)cpu_khz % 1000); | ||
206 | |||
207 | set_cyc2ns_scale(cpu_khz); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_CPU_FREQ | ||
211 | |||
212 | static unsigned int cpufreq_delayed_issched = 0; | ||
213 | static unsigned int cpufreq_init = 0; | ||
214 | static struct work_struct cpufreq_delayed_get_work; | ||
215 | |||
216 | static void handle_cpufreq_delayed_get(void *v) | ||
217 | { | ||
218 | unsigned int cpu; | ||
219 | |||
220 | for_each_online_cpu(cpu) | ||
221 | cpufreq_get(cpu); | ||
222 | |||
223 | cpufreq_delayed_issched = 0; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries | ||
228 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
229 | * at is still correct. | ||
230 | */ | ||
231 | static inline void cpufreq_delayed_get(void) | ||
232 | { | ||
233 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
234 | cpufreq_delayed_issched = 1; | ||
235 | printk(KERN_DEBUG "Checking if CPU frequency changed.\n"); | ||
236 | schedule_work(&cpufreq_delayed_get_work); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * if the CPU frequency is scaled, TSC-based delays will need a different | ||
242 | * loops_per_jiffy value to function properly. | ||
243 | */ | ||
244 | static unsigned int ref_freq = 0; | ||
245 | static unsigned long loops_per_jiffy_ref = 0; | ||
246 | static unsigned long cpu_khz_ref = 0; | ||
247 | |||
248 | static int | ||
249 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | ||
250 | { | ||
251 | struct cpufreq_freqs *freq = data; | ||
252 | |||
253 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
254 | write_seqlock_irq(&xtime_lock); | ||
255 | |||
256 | if (!ref_freq) { | ||
257 | if (!freq->old){ | ||
258 | ref_freq = freq->new; | ||
259 | goto end; | ||
260 | } | ||
261 | ref_freq = freq->old; | ||
262 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
263 | cpu_khz_ref = cpu_khz; | ||
264 | } | ||
265 | |||
266 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
267 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
268 | (val == CPUFREQ_RESUMECHANGE)) { | ||
269 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
270 | cpu_data[freq->cpu].loops_per_jiffy = | ||
271 | cpufreq_scale(loops_per_jiffy_ref, | ||
272 | ref_freq, freq->new); | ||
273 | |||
274 | if (cpu_khz) { | ||
275 | |||
276 | if (num_online_cpus() == 1) | ||
277 | cpu_khz = cpufreq_scale(cpu_khz_ref, | ||
278 | ref_freq, freq->new); | ||
279 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
280 | tsc_khz = cpu_khz; | ||
281 | set_cyc2ns_scale(cpu_khz); | ||
282 | /* | ||
283 | * TSC based sched_clock turns | ||
284 | * to junk w/ cpufreq | ||
285 | */ | ||
286 | mark_tsc_unstable(); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | end: | ||
291 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
292 | write_sequnlock_irq(&xtime_lock); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static struct notifier_block time_cpufreq_notifier_block = { | ||
298 | .notifier_call = time_cpufreq_notifier | ||
299 | }; | ||
300 | |||
301 | static int __init cpufreq_tsc(void) | ||
302 | { | ||
303 | int ret; | ||
304 | |||
305 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | ||
306 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
307 | CPUFREQ_TRANSITION_NOTIFIER); | ||
308 | if (!ret) | ||
309 | cpufreq_init = 1; | ||
310 | |||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | core_initcall(cpufreq_tsc); | ||
315 | |||
316 | #endif | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3b97a5eae9e8..a5f4f2aa007a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -369,6 +369,11 @@ static void acpi_processor_idle(void) | |||
369 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | 369 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); |
370 | /* Get end time (ticks) */ | 370 | /* Get end time (ticks) */ |
371 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); | 371 | t2 = inl(acpi_fadt.xpm_tmr_blk.address); |
372 | |||
373 | #ifdef CONFIG_GENERIC_TIME | ||
374 | /* TSC halts in C2, so notify users */ | ||
375 | mark_tsc_unstable(); | ||
376 | #endif | ||
372 | /* Re-enable interrupts */ | 377 | /* Re-enable interrupts */ |
373 | local_irq_enable(); | 378 | local_irq_enable(); |
374 | set_thread_flag(TIF_POLLING_NRFLAG); | 379 | set_thread_flag(TIF_POLLING_NRFLAG); |
@@ -409,6 +414,10 @@ static void acpi_processor_idle(void) | |||
409 | ACPI_MTX_DO_NOT_LOCK); | 414 | ACPI_MTX_DO_NOT_LOCK); |
410 | } | 415 | } |
411 | 416 | ||
417 | #ifdef CONFIG_GENERIC_TIME | ||
418 | /* TSC halts in C3, so notify users */ | ||
419 | mark_tsc_unstable(); | ||
420 | #endif | ||
412 | /* Re-enable interrupts */ | 421 | /* Re-enable interrupts */ |
413 | local_irq_enable(); | 422 | local_irq_enable(); |
414 | set_thread_flag(TIF_POLLING_NRFLAG); | 423 | set_thread_flag(TIF_POLLING_NRFLAG); |
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h index 4b9703bb0288..807992fd4171 100644 --- a/include/asm-i386/mach-default/mach_timer.h +++ b/include/asm-i386/mach-default/mach_timer.h | |||
@@ -15,7 +15,9 @@ | |||
15 | #ifndef _MACH_TIMER_H | 15 | #ifndef _MACH_TIMER_H |
16 | #define _MACH_TIMER_H | 16 | #define _MACH_TIMER_H |
17 | 17 | ||
18 | #define CALIBRATE_LATCH (5 * LATCH) | 18 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ |
19 | #define CALIBRATE_LATCH \ | ||
20 | ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) | ||
19 | 21 | ||
20 | static inline void mach_prepare_counter(void) | 22 | static inline void mach_prepare_counter(void) |
21 | { | 23 | { |
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h index 1cce2b924a80..94268399170d 100644 --- a/include/asm-i386/mach-summit/mach_mpparse.h +++ b/include/asm-i386/mach-summit/mach_mpparse.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | #include <mach_apic.h> | 4 | #include <mach_apic.h> |
5 | #include <asm/tsc.h> | ||
5 | 6 | ||
6 | extern int use_cyclone; | 7 | extern int use_cyclone; |
7 | 8 | ||
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
29 | (!strncmp(productid, "VIGIL SMP", 9) | 30 | (!strncmp(productid, "VIGIL SMP", 9) |
30 | || !strncmp(productid, "EXA", 3) | 31 | || !strncmp(productid, "EXA", 3) |
31 | || !strncmp(productid, "RUTHLESS SMP", 12))){ | 32 | || !strncmp(productid, "RUTHLESS SMP", 12))){ |
33 | mark_tsc_unstable(); | ||
32 | use_cyclone = 1; /*enable cyclone-timer*/ | 34 | use_cyclone = 1; /*enable cyclone-timer*/ |
33 | setup_summit(); | 35 | setup_summit(); |
34 | return 1; | 36 | return 1; |
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
42 | if (!strncmp(oem_id, "IBM", 3) && | 44 | if (!strncmp(oem_id, "IBM", 3) && |
43 | (!strncmp(oem_table_id, "SERVIGIL", 8) | 45 | (!strncmp(oem_table_id, "SERVIGIL", 8) |
44 | || !strncmp(oem_table_id, "EXA", 3))){ | 46 | || !strncmp(oem_table_id, "EXA", 3))){ |
47 | mark_tsc_unstable(); | ||
45 | use_cyclone = 1; /*enable cyclone-timer*/ | 48 | use_cyclone = 1; /*enable cyclone-timer*/ |
46 | setup_summit(); | 49 | setup_summit(); |
47 | return 1; | 50 | return 1; |
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h index d434984303ca..3666044409f0 100644 --- a/include/asm-i386/timex.h +++ b/include/asm-i386/timex.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #define _ASMi386_TIMEX_H | 7 | #define _ASMi386_TIMEX_H |
8 | 8 | ||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/tsc.h> | ||
10 | 11 | ||
11 | #ifdef CONFIG_X86_ELAN | 12 | #ifdef CONFIG_X86_ELAN |
12 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 13 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ |
@@ -15,39 +16,6 @@ | |||
15 | #endif | 16 | #endif |
16 | 17 | ||
17 | 18 | ||
18 | /* | ||
19 | * Standard way to access the cycle counter on i586+ CPUs. | ||
20 | * Currently only used on SMP. | ||
21 | * | ||
22 | * If you really have a SMP machine with i486 chips or older, | ||
23 | * compile for that, and this will just always return zero. | ||
24 | * That's ok, it just means that the nicer scheduling heuristics | ||
25 | * won't work for you. | ||
26 | * | ||
27 | * We only use the low 32 bits, and we'd simply better make sure | ||
28 | * that we reschedule before that wraps. Scheduling at least every | ||
29 | * four billion cycles just basically sounds like a good idea, | ||
30 | * regardless of how fast the machine is. | ||
31 | */ | ||
32 | typedef unsigned long long cycles_t; | ||
33 | |||
34 | static inline cycles_t get_cycles (void) | ||
35 | { | ||
36 | unsigned long long ret=0; | ||
37 | |||
38 | #ifndef CONFIG_X86_TSC | ||
39 | if (!cpu_has_tsc) | ||
40 | return 0; | ||
41 | #endif | ||
42 | |||
43 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
44 | rdtscll(ret); | ||
45 | #endif | ||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | extern unsigned int cpu_khz; | ||
50 | |||
51 | extern int read_current_timer(unsigned long *timer_value); | 19 | extern int read_current_timer(unsigned long *timer_value); |
52 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | 20 | #define ARCH_HAS_READ_CURRENT_TIMER 1 |
53 | 21 | ||
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h new file mode 100644 index 000000000000..97b828ce31e0 --- /dev/null +++ b/include/asm-i386/tsc.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/tsc.h | ||
3 | * | ||
4 | * i386 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_i386_TSC_H | ||
7 | #define _ASM_i386_TSC_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/processor.h> | ||
11 | |||
12 | /* | ||
13 | * Standard way to access the cycle counter on i586+ CPUs. | ||
14 | * Currently only used on SMP. | ||
15 | * | ||
16 | * If you really have a SMP machine with i486 chips or older, | ||
17 | * compile for that, and this will just always return zero. | ||
18 | * That's ok, it just means that the nicer scheduling heuristics | ||
19 | * won't work for you. | ||
20 | * | ||
21 | * We only use the low 32 bits, and we'd simply better make sure | ||
22 | * that we reschedule before that wraps. Scheduling at least every | ||
23 | * four billion cycles just basically sounds like a good idea, | ||
24 | * regardless of how fast the machine is. | ||
25 | */ | ||
26 | typedef unsigned long long cycles_t; | ||
27 | |||
28 | extern unsigned int cpu_khz; | ||
29 | extern unsigned int tsc_khz; | ||
30 | |||
31 | static inline cycles_t get_cycles(void) | ||
32 | { | ||
33 | unsigned long long ret = 0; | ||
34 | |||
35 | #ifndef CONFIG_X86_TSC | ||
36 | if (!cpu_has_tsc) | ||
37 | return 0; | ||
38 | #endif | ||
39 | |||
40 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
41 | rdtscll(ret); | ||
42 | #endif | ||
43 | return ret; | ||
44 | } | ||
45 | |||
46 | extern void tsc_init(void); | ||
47 | extern void mark_tsc_unstable(void); | ||
48 | |||
49 | #endif | ||