diff options
author | john stultz <johnstul@us.ibm.com> | 2006-06-26 03:25:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:21 -0400 |
commit | 539eb11e6e904f2cd4f62908cc5e44d724879721 (patch) | |
tree | df18c747c5226b138862fb19fad5b1527055b9c9 /arch | |
parent | 8d016ef1380a2a9a5ca5742ede04334199868f82 (diff) |
[PATCH] Time: i386 Conversion - part 2: Rework TSC Support
As part of the i386 conversion to the generic timekeeping infrastructure, this
introduces a new tsc.c file. The code in this file replaces the TSC
initialization, management and access code currently in timer_tsc.c (which
will be removed) that we want to preserve.
The code also introduces the following functionality:
o tsc_khz: like cpu_khz but stores the TSC frequency on systems that do not
change TSC frequency w/ CPU frequency
o check/mark_tsc_unstable: accessor/modifier flag for TSC timekeeping
usability
o minor cleanups to calibration math.
This patch also includes a one line __cpuinitdata fix from Zwane Mwaikambo.
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/numaq.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/timers/timer_tsc.c | 178 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 316 |
5 files changed, 324 insertions, 183 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 4142d69a5336..ca70d61ea834 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ |
8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ | 8 | ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ |
9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ | 9 | pci-dma.o i386_ksyms.o i387.o bootflag.o \ |
10 | quirks.o i8237.o topology.o alternative.o i8253.o | 10 | quirks.o i8237.o topology.o alternative.o i8253.o tsc.o |
11 | 11 | ||
12 | obj-y += cpu/ | 12 | obj-y += cpu/ |
13 | obj-y += timers/ | 13 | obj-y += timers/ |
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c index 5f5b075f860a..0caf14652bad 100644 --- a/arch/i386/kernel/numaq.c +++ b/arch/i386/kernel/numaq.c | |||
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void) | |||
79 | return 1; | 79 | return 1; |
80 | } | 80 | } |
81 | 81 | ||
82 | static int __init numaq_dsc_disable(void) | 82 | static int __init numaq_tsc_disable(void) |
83 | { | 83 | { |
84 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); | 84 | if (num_online_nodes() > 1) { |
85 | tsc_disable = 1; | 85 | printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); |
86 | tsc_disable = 1; | ||
87 | } | ||
86 | return 0; | 88 | return 0; |
87 | } | 89 | } |
88 | core_initcall(numaq_dsc_disable); | 90 | arch_initcall(numaq_tsc_disable); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 6bef9273733e..4a65040cc624 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -1575,6 +1575,7 @@ void __init setup_arch(char **cmdline_p) | |||
1575 | conswitchp = &dummy_con; | 1575 | conswitchp = &dummy_con; |
1576 | #endif | 1576 | #endif |
1577 | #endif | 1577 | #endif |
1578 | tsc_init(); | ||
1578 | } | 1579 | } |
1579 | 1580 | ||
1580 | static __init int add_pcspkr(void) | 1581 | static __init int add_pcspkr(void) |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index f1187ddb0d0f..243ec0484079 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -32,10 +32,6 @@ static unsigned long hpet_last; | |||
32 | static struct timer_opts timer_tsc; | 32 | static struct timer_opts timer_tsc; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static inline void cpufreq_delayed_get(void); | ||
36 | |||
37 | int tsc_disable __devinitdata = 0; | ||
38 | |||
39 | static int use_tsc; | 35 | static int use_tsc; |
40 | /* Number of usecs that the last interrupt was delayed */ | 36 | /* Number of usecs that the last interrupt was delayed */ |
41 | static int delay_at_last_interrupt; | 37 | static int delay_at_last_interrupt; |
@@ -144,30 +140,6 @@ static unsigned long long monotonic_clock_tsc(void) | |||
144 | return base + cycles_2_ns(this_offset - last_offset); | 140 | return base + cycles_2_ns(this_offset - last_offset); |
145 | } | 141 | } |
146 | 142 | ||
147 | /* | ||
148 | * Scheduler clock - returns current time in nanosec units. | ||
149 | */ | ||
150 | unsigned long long sched_clock(void) | ||
151 | { | ||
152 | unsigned long long this_offset; | ||
153 | |||
154 | /* | ||
155 | * In the NUMA case we dont use the TSC as they are not | ||
156 | * synchronized across all CPUs. | ||
157 | */ | ||
158 | #ifndef CONFIG_NUMA | ||
159 | if (!use_tsc) | ||
160 | #endif | ||
161 | /* no locking but a rare wrong value is not a big deal */ | ||
162 | return jiffies_64 * (1000000000 / HZ); | ||
163 | |||
164 | /* Read the Time Stamp Counter */ | ||
165 | rdtscll(this_offset); | ||
166 | |||
167 | /* return the value in ns */ | ||
168 | return cycles_2_ns(this_offset); | ||
169 | } | ||
170 | |||
171 | static void delay_tsc(unsigned long loops) | 143 | static void delay_tsc(unsigned long loops) |
172 | { | 144 | { |
173 | unsigned long bclock, now; | 145 | unsigned long bclock, now; |
@@ -231,136 +203,6 @@ static void mark_offset_tsc_hpet(void) | |||
231 | } | 203 | } |
232 | #endif | 204 | #endif |
233 | 205 | ||
234 | |||
235 | #ifdef CONFIG_CPU_FREQ | ||
236 | #include <linux/workqueue.h> | ||
237 | |||
238 | static unsigned int cpufreq_delayed_issched = 0; | ||
239 | static unsigned int cpufreq_init = 0; | ||
240 | static struct work_struct cpufreq_delayed_get_work; | ||
241 | |||
242 | static void handle_cpufreq_delayed_get(void *v) | ||
243 | { | ||
244 | unsigned int cpu; | ||
245 | for_each_online_cpu(cpu) { | ||
246 | cpufreq_get(cpu); | ||
247 | } | ||
248 | cpufreq_delayed_issched = 0; | ||
249 | } | ||
250 | |||
251 | /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries | ||
252 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
253 | * at is still correct. | ||
254 | */ | ||
255 | static inline void cpufreq_delayed_get(void) | ||
256 | { | ||
257 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
258 | cpufreq_delayed_issched = 1; | ||
259 | printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n"); | ||
260 | schedule_work(&cpufreq_delayed_get_work); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /* If the CPU frequency is scaled, TSC-based delays will need a different | ||
265 | * loops_per_jiffy value to function properly. | ||
266 | */ | ||
267 | |||
268 | static unsigned int ref_freq = 0; | ||
269 | static unsigned long loops_per_jiffy_ref = 0; | ||
270 | |||
271 | #ifndef CONFIG_SMP | ||
272 | static unsigned long fast_gettimeoffset_ref = 0; | ||
273 | static unsigned int cpu_khz_ref = 0; | ||
274 | #endif | ||
275 | |||
276 | static int | ||
277 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
278 | void *data) | ||
279 | { | ||
280 | struct cpufreq_freqs *freq = data; | ||
281 | |||
282 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
283 | write_seqlock_irq(&xtime_lock); | ||
284 | if (!ref_freq) { | ||
285 | if (!freq->old){ | ||
286 | ref_freq = freq->new; | ||
287 | goto end; | ||
288 | } | ||
289 | ref_freq = freq->old; | ||
290 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
291 | #ifndef CONFIG_SMP | ||
292 | fast_gettimeoffset_ref = fast_gettimeoffset_quotient; | ||
293 | cpu_khz_ref = cpu_khz; | ||
294 | #endif | ||
295 | } | ||
296 | |||
297 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
298 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
299 | (val == CPUFREQ_RESUMECHANGE)) { | ||
300 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
301 | cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | ||
302 | #ifndef CONFIG_SMP | ||
303 | if (cpu_khz) | ||
304 | cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); | ||
305 | if (use_tsc) { | ||
306 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
307 | fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); | ||
308 | set_cyc2ns_scale(cpu_khz); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | } | ||
313 | |||
314 | end: | ||
315 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
316 | write_sequnlock_irq(&xtime_lock); | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static struct notifier_block time_cpufreq_notifier_block = { | ||
322 | .notifier_call = time_cpufreq_notifier | ||
323 | }; | ||
324 | |||
325 | |||
326 | static int __init cpufreq_tsc(void) | ||
327 | { | ||
328 | int ret; | ||
329 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | ||
330 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
331 | CPUFREQ_TRANSITION_NOTIFIER); | ||
332 | if (!ret) | ||
333 | cpufreq_init = 1; | ||
334 | return ret; | ||
335 | } | ||
336 | core_initcall(cpufreq_tsc); | ||
337 | |||
338 | #else /* CONFIG_CPU_FREQ */ | ||
339 | static inline void cpufreq_delayed_get(void) { return; } | ||
340 | #endif | ||
341 | |||
342 | int recalibrate_cpu_khz(void) | ||
343 | { | ||
344 | #ifndef CONFIG_SMP | ||
345 | unsigned int cpu_khz_old = cpu_khz; | ||
346 | |||
347 | if (cpu_has_tsc) { | ||
348 | local_irq_disable(); | ||
349 | init_cpu_khz(); | ||
350 | local_irq_enable(); | ||
351 | cpu_data[0].loops_per_jiffy = | ||
352 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
353 | cpu_khz_old, | ||
354 | cpu_khz); | ||
355 | return 0; | ||
356 | } else | ||
357 | return -ENODEV; | ||
358 | #else | ||
359 | return -ENODEV; | ||
360 | #endif | ||
361 | } | ||
362 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
363 | |||
364 | static void mark_offset_tsc(void) | 206 | static void mark_offset_tsc(void) |
365 | { | 207 | { |
366 | unsigned long lost,delay; | 208 | unsigned long lost,delay; |
@@ -451,9 +293,6 @@ static void mark_offset_tsc(void) | |||
451 | 293 | ||
452 | clock_fallback(); | 294 | clock_fallback(); |
453 | } | 295 | } |
454 | /* ... but give the TSC a fair chance */ | ||
455 | if (lost_count > 25) | ||
456 | cpufreq_delayed_get(); | ||
457 | } else | 296 | } else |
458 | lost_count = 0; | 297 | lost_count = 0; |
459 | /* update the monotonic base value */ | 298 | /* update the monotonic base value */ |
@@ -578,23 +417,6 @@ static int tsc_resume(void) | |||
578 | return 0; | 417 | return 0; |
579 | } | 418 | } |
580 | 419 | ||
581 | #ifndef CONFIG_X86_TSC | ||
582 | /* disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
583 | * in cpu/common.c */ | ||
584 | static int __init tsc_setup(char *str) | ||
585 | { | ||
586 | tsc_disable = 1; | ||
587 | return 1; | ||
588 | } | ||
589 | #else | ||
590 | static int __init tsc_setup(char *str) | ||
591 | { | ||
592 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
593 | "cannot disable TSC.\n"); | ||
594 | return 1; | ||
595 | } | ||
596 | #endif | ||
597 | __setup("notsc", tsc_setup); | ||
598 | 420 | ||
599 | 421 | ||
600 | 422 | ||
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c new file mode 100644 index 000000000000..3b64eaafce2b --- /dev/null +++ b/arch/i386/kernel/tsc.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | ||
3 | * which was originally moved from arch/i386/kernel/time.c. | ||
4 | * See comments there for proper credits. | ||
5 | */ | ||
6 | |||
7 | #include <linux/workqueue.h> | ||
8 | #include <linux/cpufreq.h> | ||
9 | #include <linux/jiffies.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | #include <asm/tsc.h> | ||
13 | #include <asm/io.h> | ||
14 | |||
15 | #include "mach_timer.h" | ||
16 | |||
17 | /* | ||
18 | * On some systems the TSC frequency does not | ||
19 | * change with the cpu frequency. So we need | ||
20 | * an extra value to store the TSC freq | ||
21 | */ | ||
22 | unsigned int tsc_khz; | ||
23 | |||
24 | int tsc_disable __cpuinitdata = 0; | ||
25 | |||
26 | #ifdef CONFIG_X86_TSC | ||
27 | static int __init tsc_setup(char *str) | ||
28 | { | ||
29 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
30 | "cannot disable TSC.\n"); | ||
31 | return 1; | ||
32 | } | ||
33 | #else | ||
34 | /* | ||
35 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
36 | * in cpu/common.c | ||
37 | */ | ||
38 | static int __init tsc_setup(char *str) | ||
39 | { | ||
40 | tsc_disable = 1; | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | __setup("notsc", tsc_setup); | ||
47 | |||
48 | |||
49 | /* | ||
50 | * code to mark and check if the TSC is unstable | ||
51 | * due to cpufreq or due to unsynced TSCs | ||
52 | */ | ||
53 | static int tsc_unstable; | ||
54 | |||
55 | static inline int check_tsc_unstable(void) | ||
56 | { | ||
57 | return tsc_unstable; | ||
58 | } | ||
59 | |||
60 | void mark_tsc_unstable(void) | ||
61 | { | ||
62 | tsc_unstable = 1; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
65 | |||
66 | /* Accellerators for sched_clock() | ||
67 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
68 | * basic equation: | ||
69 | * ns = cycles / (freq / ns_per_sec) | ||
70 | * ns = cycles * (ns_per_sec / freq) | ||
71 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
72 | * ns = cycles * (10^6 / cpu_khz) | ||
73 | * | ||
74 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
75 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
76 | * ns = cycles * cyc2ns_scale / SC | ||
77 | * | ||
78 | * And since SC is a constant power of two, we can convert the div | ||
79 | * into a shift. | ||
80 | * | ||
81 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
82 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
83 | * (mathieu.desnoyers@polymtl.ca) | ||
84 | * | ||
85 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
86 | */ | ||
87 | static unsigned long cyc2ns_scale __read_mostly; | ||
88 | |||
89 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
90 | |||
91 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | ||
92 | { | ||
93 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
94 | } | ||
95 | |||
96 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
97 | { | ||
98 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Scheduler clock - returns current time in nanosec units. | ||
103 | */ | ||
104 | unsigned long long sched_clock(void) | ||
105 | { | ||
106 | unsigned long long this_offset; | ||
107 | |||
108 | /* | ||
109 | * in the NUMA case we dont use the TSC as they are not | ||
110 | * synchronized across all CPUs. | ||
111 | */ | ||
112 | #ifndef CONFIG_NUMA | ||
113 | if (!cpu_khz || check_tsc_unstable()) | ||
114 | #endif | ||
115 | /* no locking but a rare wrong value is not a big deal */ | ||
116 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | ||
117 | |||
118 | /* read the Time Stamp Counter: */ | ||
119 | rdtscll(this_offset); | ||
120 | |||
121 | /* return the value in ns */ | ||
122 | return cycles_2_ns(this_offset); | ||
123 | } | ||
124 | |||
125 | static unsigned long calculate_cpu_khz(void) | ||
126 | { | ||
127 | unsigned long long start, end; | ||
128 | unsigned long count; | ||
129 | u64 delta64; | ||
130 | int i; | ||
131 | unsigned long flags; | ||
132 | |||
133 | local_irq_save(flags); | ||
134 | |||
135 | /* run 3 times to ensure the cache is warm */ | ||
136 | for (i = 0; i < 3; i++) { | ||
137 | mach_prepare_counter(); | ||
138 | rdtscll(start); | ||
139 | mach_countup(&count); | ||
140 | rdtscll(end); | ||
141 | } | ||
142 | /* | ||
143 | * Error: ECTCNEVERSET | ||
144 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
145 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
146 | * 32 bits.. | ||
147 | */ | ||
148 | if (count <= 1) | ||
149 | goto err; | ||
150 | |||
151 | delta64 = end - start; | ||
152 | |||
153 | /* cpu freq too fast: */ | ||
154 | if (delta64 > (1ULL<<32)) | ||
155 | goto err; | ||
156 | |||
157 | /* cpu freq too slow: */ | ||
158 | if (delta64 <= CALIBRATE_TIME_MSEC) | ||
159 | goto err; | ||
160 | |||
161 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
162 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
163 | |||
164 | local_irq_restore(flags); | ||
165 | return (unsigned long)delta64; | ||
166 | err: | ||
167 | local_irq_restore(flags); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | int recalibrate_cpu_khz(void) | ||
172 | { | ||
173 | #ifndef CONFIG_SMP | ||
174 | unsigned long cpu_khz_old = cpu_khz; | ||
175 | |||
176 | if (cpu_has_tsc) { | ||
177 | cpu_khz = calculate_cpu_khz(); | ||
178 | tsc_khz = cpu_khz; | ||
179 | cpu_data[0].loops_per_jiffy = | ||
180 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | ||
181 | cpu_khz_old, cpu_khz); | ||
182 | return 0; | ||
183 | } else | ||
184 | return -ENODEV; | ||
185 | #else | ||
186 | return -ENODEV; | ||
187 | #endif | ||
188 | } | ||
189 | |||
190 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
191 | |||
192 | void tsc_init(void) | ||
193 | { | ||
194 | if (!cpu_has_tsc || tsc_disable) | ||
195 | return; | ||
196 | |||
197 | cpu_khz = calculate_cpu_khz(); | ||
198 | tsc_khz = cpu_khz; | ||
199 | |||
200 | if (!cpu_khz) | ||
201 | return; | ||
202 | |||
203 | printk("Detected %lu.%03lu MHz processor.\n", | ||
204 | (unsigned long)cpu_khz / 1000, | ||
205 | (unsigned long)cpu_khz % 1000); | ||
206 | |||
207 | set_cyc2ns_scale(cpu_khz); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_CPU_FREQ | ||
211 | |||
212 | static unsigned int cpufreq_delayed_issched = 0; | ||
213 | static unsigned int cpufreq_init = 0; | ||
214 | static struct work_struct cpufreq_delayed_get_work; | ||
215 | |||
216 | static void handle_cpufreq_delayed_get(void *v) | ||
217 | { | ||
218 | unsigned int cpu; | ||
219 | |||
220 | for_each_online_cpu(cpu) | ||
221 | cpufreq_get(cpu); | ||
222 | |||
223 | cpufreq_delayed_issched = 0; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries | ||
228 | * to verify the CPU frequency the timing core thinks the CPU is running | ||
229 | * at is still correct. | ||
230 | */ | ||
231 | static inline void cpufreq_delayed_get(void) | ||
232 | { | ||
233 | if (cpufreq_init && !cpufreq_delayed_issched) { | ||
234 | cpufreq_delayed_issched = 1; | ||
235 | printk(KERN_DEBUG "Checking if CPU frequency changed.\n"); | ||
236 | schedule_work(&cpufreq_delayed_get_work); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * if the CPU frequency is scaled, TSC-based delays will need a different | ||
242 | * loops_per_jiffy value to function properly. | ||
243 | */ | ||
244 | static unsigned int ref_freq = 0; | ||
245 | static unsigned long loops_per_jiffy_ref = 0; | ||
246 | static unsigned long cpu_khz_ref = 0; | ||
247 | |||
248 | static int | ||
249 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | ||
250 | { | ||
251 | struct cpufreq_freqs *freq = data; | ||
252 | |||
253 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
254 | write_seqlock_irq(&xtime_lock); | ||
255 | |||
256 | if (!ref_freq) { | ||
257 | if (!freq->old){ | ||
258 | ref_freq = freq->new; | ||
259 | goto end; | ||
260 | } | ||
261 | ref_freq = freq->old; | ||
262 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | ||
263 | cpu_khz_ref = cpu_khz; | ||
264 | } | ||
265 | |||
266 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
267 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
268 | (val == CPUFREQ_RESUMECHANGE)) { | ||
269 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
270 | cpu_data[freq->cpu].loops_per_jiffy = | ||
271 | cpufreq_scale(loops_per_jiffy_ref, | ||
272 | ref_freq, freq->new); | ||
273 | |||
274 | if (cpu_khz) { | ||
275 | |||
276 | if (num_online_cpus() == 1) | ||
277 | cpu_khz = cpufreq_scale(cpu_khz_ref, | ||
278 | ref_freq, freq->new); | ||
279 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
280 | tsc_khz = cpu_khz; | ||
281 | set_cyc2ns_scale(cpu_khz); | ||
282 | /* | ||
283 | * TSC based sched_clock turns | ||
284 | * to junk w/ cpufreq | ||
285 | */ | ||
286 | mark_tsc_unstable(); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | end: | ||
291 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | ||
292 | write_sequnlock_irq(&xtime_lock); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static struct notifier_block time_cpufreq_notifier_block = { | ||
298 | .notifier_call = time_cpufreq_notifier | ||
299 | }; | ||
300 | |||
301 | static int __init cpufreq_tsc(void) | ||
302 | { | ||
303 | int ret; | ||
304 | |||
305 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | ||
306 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
307 | CPUFREQ_TRANSITION_NOTIFIER); | ||
308 | if (!ret) | ||
309 | cpufreq_init = 1; | ||
310 | |||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | core_initcall(cpufreq_tsc); | ||
315 | |||
316 | #endif | ||