diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/cpufreq/cpufreq.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 1428 |
1 files changed, 1428 insertions, 0 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c new file mode 100644 index 000000000000..b30001f31610 --- /dev/null +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -0,0 +1,1428 @@ | |||
1 | /* | ||
2 | * linux/drivers/cpufreq/cpufreq.c | ||
3 | * | ||
4 | * Copyright (C) 2001 Russell King | ||
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/cpufreq.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/completion.h> | ||
26 | |||
27 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) | ||
28 | |||
29 | /** | ||
30 | * The "cpufreq driver" - the arch- or hardware-dependend low | ||
31 | * level driver of CPUFreq support, and its spinlock. This lock | ||
32 | * also protects the cpufreq_cpu_data array. | ||
33 | */ | ||
34 | static struct cpufreq_driver *cpufreq_driver; | ||
35 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | ||
36 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | ||
37 | |||
38 | |||
39 | /* we keep a copy of all ->add'ed CPU's struct sys_device here; | ||
40 | * as it is only accessed in ->add and ->remove, no lock or reference | ||
41 | * count is necessary. | ||
42 | */ | ||
43 | static struct sys_device *cpu_sys_devices[NR_CPUS]; | ||
44 | |||
45 | |||
46 | /* internal prototypes */ | ||
47 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | ||
48 | static void handle_update(void *data); | ||
49 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci); | ||
50 | |||
51 | /** | ||
52 | * Two notifier lists: the "policy" list is involved in the | ||
53 | * validation process for a new CPU frequency policy; the | ||
54 | * "transition" list for kernel code that needs to handle | ||
55 | * changes to devices when the CPU clock speed changes. | ||
56 | * The mutex locks both lists. | ||
57 | */ | ||
58 | static struct notifier_block *cpufreq_policy_notifier_list; | ||
59 | static struct notifier_block *cpufreq_transition_notifier_list; | ||
60 | static DECLARE_RWSEM (cpufreq_notifier_rwsem); | ||
61 | |||
62 | |||
63 | static LIST_HEAD(cpufreq_governor_list); | ||
64 | static DECLARE_MUTEX (cpufreq_governor_sem); | ||
65 | |||
66 | struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) | ||
67 | { | ||
68 | struct cpufreq_policy *data; | ||
69 | unsigned long flags; | ||
70 | |||
71 | if (cpu >= NR_CPUS) | ||
72 | goto err_out; | ||
73 | |||
74 | /* get the cpufreq driver */ | ||
75 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
76 | |||
77 | if (!cpufreq_driver) | ||
78 | goto err_out_unlock; | ||
79 | |||
80 | if (!try_module_get(cpufreq_driver->owner)) | ||
81 | goto err_out_unlock; | ||
82 | |||
83 | |||
84 | /* get the CPU */ | ||
85 | data = cpufreq_cpu_data[cpu]; | ||
86 | |||
87 | if (!data) | ||
88 | goto err_out_put_module; | ||
89 | |||
90 | if (!kobject_get(&data->kobj)) | ||
91 | goto err_out_put_module; | ||
92 | |||
93 | |||
94 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
95 | |||
96 | return data; | ||
97 | |||
98 | err_out_put_module: | ||
99 | module_put(cpufreq_driver->owner); | ||
100 | err_out_unlock: | ||
101 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
102 | err_out: | ||
103 | return NULL; | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | ||
106 | |||
107 | void cpufreq_cpu_put(struct cpufreq_policy *data) | ||
108 | { | ||
109 | kobject_put(&data->kobj); | ||
110 | module_put(cpufreq_driver->owner); | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | ||
113 | |||
114 | |||
115 | /********************************************************************* | ||
116 | * UNIFIED DEBUG HELPERS * | ||
117 | *********************************************************************/ | ||
118 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
119 | |||
120 | /* what part(s) of the CPUfreq subsystem are debugged? */ | ||
121 | static unsigned int debug; | ||
122 | |||
123 | /* is the debug output ratelimit'ed using printk_ratelimit? User can | ||
124 | * set or modify this value. | ||
125 | */ | ||
126 | static unsigned int debug_ratelimit = 1; | ||
127 | |||
128 | /* is the printk_ratelimit'ing enabled? It's enabled after a successful | ||
129 | * loading of a cpufreq driver, temporarily disabled when a new policy | ||
130 | * is set, and disabled upon cpufreq driver removal | ||
131 | */ | ||
132 | static unsigned int disable_ratelimit = 1; | ||
133 | static DEFINE_SPINLOCK(disable_ratelimit_lock); | ||
134 | |||
135 | static inline void cpufreq_debug_enable_ratelimit(void) | ||
136 | { | ||
137 | unsigned long flags; | ||
138 | |||
139 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
140 | if (disable_ratelimit) | ||
141 | disable_ratelimit--; | ||
142 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
143 | } | ||
144 | |||
145 | static inline void cpufreq_debug_disable_ratelimit(void) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
150 | disable_ratelimit++; | ||
151 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
152 | } | ||
153 | |||
154 | void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) | ||
155 | { | ||
156 | char s[256]; | ||
157 | va_list args; | ||
158 | unsigned int len; | ||
159 | unsigned long flags; | ||
160 | |||
161 | WARN_ON(!prefix); | ||
162 | if (type & debug) { | ||
163 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
164 | if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { | ||
165 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
166 | return; | ||
167 | } | ||
168 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
169 | |||
170 | len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); | ||
171 | |||
172 | va_start(args, fmt); | ||
173 | len += vsnprintf(&s[len], (256 - len), fmt, args); | ||
174 | va_end(args); | ||
175 | |||
176 | printk(s); | ||
177 | |||
178 | WARN_ON(len < 5); | ||
179 | } | ||
180 | } | ||
181 | EXPORT_SYMBOL(cpufreq_debug_printk); | ||
182 | |||
183 | |||
184 | module_param(debug, uint, 0644); | ||
185 | MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); | ||
186 | |||
187 | module_param(debug_ratelimit, uint, 0644); | ||
188 | MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); | ||
189 | |||
190 | #else /* !CONFIG_CPU_FREQ_DEBUG */ | ||
191 | |||
192 | static inline void cpufreq_debug_enable_ratelimit(void) { return; } | ||
193 | static inline void cpufreq_debug_disable_ratelimit(void) { return; } | ||
194 | |||
195 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
196 | |||
197 | |||
198 | /********************************************************************* | ||
199 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * | ||
200 | *********************************************************************/ | ||
201 | |||
202 | /** | ||
203 | * adjust_jiffies - adjust the system "loops_per_jiffy" | ||
204 | * | ||
205 | * This function alters the system "loops_per_jiffy" for the clock | ||
206 | * speed change. Note that loops_per_jiffy cannot be updated on SMP | ||
207 | * systems as each CPU might be scaled differently. So, use the arch | ||
208 | * per-CPU loops_per_jiffy value wherever possible. | ||
209 | */ | ||
210 | #ifndef CONFIG_SMP | ||
211 | static unsigned long l_p_j_ref; | ||
212 | static unsigned int l_p_j_ref_freq; | ||
213 | |||
214 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | ||
215 | { | ||
216 | if (ci->flags & CPUFREQ_CONST_LOOPS) | ||
217 | return; | ||
218 | |||
219 | if (!l_p_j_ref_freq) { | ||
220 | l_p_j_ref = loops_per_jiffy; | ||
221 | l_p_j_ref_freq = ci->old; | ||
222 | dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | ||
223 | } | ||
224 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || | ||
225 | (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || | ||
226 | (val == CPUFREQ_RESUMECHANGE)) { | ||
227 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); | ||
228 | dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); | ||
229 | } | ||
230 | } | ||
231 | #else | ||
232 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } | ||
233 | #endif | ||
234 | |||
235 | |||
236 | /** | ||
237 | * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition | ||
238 | * | ||
239 | * This function calls the transition notifiers and the "adjust_jiffies" function. It is called | ||
240 | * twice on all CPU frequency changes that have external effects. | ||
241 | */ | ||
242 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | ||
243 | { | ||
244 | BUG_ON(irqs_disabled()); | ||
245 | |||
246 | freqs->flags = cpufreq_driver->flags; | ||
247 | dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); | ||
248 | |||
249 | down_read(&cpufreq_notifier_rwsem); | ||
250 | switch (state) { | ||
251 | case CPUFREQ_PRECHANGE: | ||
252 | /* detect if the driver reported a value as "old frequency" which | ||
253 | * is not equal to what the cpufreq core thinks is "old frequency". | ||
254 | */ | ||
255 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
256 | if ((likely(cpufreq_cpu_data[freqs->cpu])) && | ||
257 | (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)) && | ||
258 | (likely(cpufreq_cpu_data[freqs->cpu]->cur)) && | ||
259 | (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur))) | ||
260 | { | ||
261 | printk(KERN_WARNING "Warning: CPU frequency is %u, " | ||
262 | "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur); | ||
263 | freqs->old = cpufreq_cpu_data[freqs->cpu]->cur; | ||
264 | } | ||
265 | } | ||
266 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); | ||
267 | adjust_jiffies(CPUFREQ_PRECHANGE, freqs); | ||
268 | break; | ||
269 | case CPUFREQ_POSTCHANGE: | ||
270 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | ||
271 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); | ||
272 | if ((likely(cpufreq_cpu_data[freqs->cpu])) && | ||
273 | (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu))) | ||
274 | cpufreq_cpu_data[freqs->cpu]->cur = freqs->new; | ||
275 | break; | ||
276 | } | ||
277 | up_read(&cpufreq_notifier_rwsem); | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(cpufreq_notify_transition); | ||
280 | |||
281 | |||
282 | |||
283 | /********************************************************************* | ||
284 | * SYSFS INTERFACE * | ||
285 | *********************************************************************/ | ||
286 | |||
287 | /** | ||
288 | * cpufreq_parse_governor - parse a governor string | ||
289 | */ | ||
290 | static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, | ||
291 | struct cpufreq_governor **governor) | ||
292 | { | ||
293 | if (!cpufreq_driver) | ||
294 | return -EINVAL; | ||
295 | if (cpufreq_driver->setpolicy) { | ||
296 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { | ||
297 | *policy = CPUFREQ_POLICY_PERFORMANCE; | ||
298 | return 0; | ||
299 | } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { | ||
300 | *policy = CPUFREQ_POLICY_POWERSAVE; | ||
301 | return 0; | ||
302 | } | ||
303 | return -EINVAL; | ||
304 | } else { | ||
305 | struct cpufreq_governor *t; | ||
306 | down(&cpufreq_governor_sem); | ||
307 | if (!cpufreq_driver || !cpufreq_driver->target) | ||
308 | goto out; | ||
309 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
310 | if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { | ||
311 | *governor = t; | ||
312 | up(&cpufreq_governor_sem); | ||
313 | return 0; | ||
314 | } | ||
315 | } | ||
316 | out: | ||
317 | up(&cpufreq_governor_sem); | ||
318 | } | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | EXPORT_SYMBOL_GPL(cpufreq_parse_governor); | ||
322 | |||
323 | |||
324 | /* drivers/base/cpu.c */ | ||
325 | extern struct sysdev_class cpu_sysdev_class; | ||
326 | |||
327 | |||
328 | /** | ||
329 | * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information | ||
330 | * | ||
331 | * Write out information from cpufreq_driver->policy[cpu]; object must be | ||
332 | * "unsigned int". | ||
333 | */ | ||
334 | |||
335 | #define show_one(file_name, object) \ | ||
336 | static ssize_t show_##file_name \ | ||
337 | (struct cpufreq_policy * policy, char *buf) \ | ||
338 | { \ | ||
339 | return sprintf (buf, "%u\n", policy->object); \ | ||
340 | } | ||
341 | |||
342 | show_one(cpuinfo_min_freq, cpuinfo.min_freq); | ||
343 | show_one(cpuinfo_max_freq, cpuinfo.max_freq); | ||
344 | show_one(scaling_min_freq, min); | ||
345 | show_one(scaling_max_freq, max); | ||
346 | show_one(scaling_cur_freq, cur); | ||
347 | |||
348 | /** | ||
349 | * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access | ||
350 | */ | ||
351 | #define store_one(file_name, object) \ | ||
352 | static ssize_t store_##file_name \ | ||
353 | (struct cpufreq_policy * policy, const char *buf, size_t count) \ | ||
354 | { \ | ||
355 | unsigned int ret = -EINVAL; \ | ||
356 | struct cpufreq_policy new_policy; \ | ||
357 | \ | ||
358 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ | ||
359 | if (ret) \ | ||
360 | return -EINVAL; \ | ||
361 | \ | ||
362 | ret = sscanf (buf, "%u", &new_policy.object); \ | ||
363 | if (ret != 1) \ | ||
364 | return -EINVAL; \ | ||
365 | \ | ||
366 | ret = cpufreq_set_policy(&new_policy); \ | ||
367 | \ | ||
368 | return ret ? ret : count; \ | ||
369 | } | ||
370 | |||
371 | store_one(scaling_min_freq,min); | ||
372 | store_one(scaling_max_freq,max); | ||
373 | |||
374 | /** | ||
375 | * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware | ||
376 | */ | ||
377 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) | ||
378 | { | ||
379 | unsigned int cur_freq = cpufreq_get(policy->cpu); | ||
380 | if (!cur_freq) | ||
381 | return sprintf(buf, "<unknown>"); | ||
382 | return sprintf(buf, "%u\n", cur_freq); | ||
383 | } | ||
384 | |||
385 | |||
386 | /** | ||
387 | * show_scaling_governor - show the current policy for the specified CPU | ||
388 | */ | ||
389 | static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) | ||
390 | { | ||
391 | if(policy->policy == CPUFREQ_POLICY_POWERSAVE) | ||
392 | return sprintf(buf, "powersave\n"); | ||
393 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
394 | return sprintf(buf, "performance\n"); | ||
395 | else if (policy->governor) | ||
396 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); | ||
397 | return -EINVAL; | ||
398 | } | ||
399 | |||
400 | |||
401 | /** | ||
402 | * store_scaling_governor - store policy for the specified CPU | ||
403 | */ | ||
404 | static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | ||
405 | const char *buf, size_t count) | ||
406 | { | ||
407 | unsigned int ret = -EINVAL; | ||
408 | char str_governor[16]; | ||
409 | struct cpufreq_policy new_policy; | ||
410 | |||
411 | ret = cpufreq_get_policy(&new_policy, policy->cpu); | ||
412 | if (ret) | ||
413 | return ret; | ||
414 | |||
415 | ret = sscanf (buf, "%15s", str_governor); | ||
416 | if (ret != 1) | ||
417 | return -EINVAL; | ||
418 | |||
419 | if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) | ||
420 | return -EINVAL; | ||
421 | |||
422 | ret = cpufreq_set_policy(&new_policy); | ||
423 | |||
424 | return ret ? ret : count; | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * show_scaling_driver - show the cpufreq driver currently loaded | ||
429 | */ | ||
430 | static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf) | ||
431 | { | ||
432 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); | ||
433 | } | ||
434 | |||
435 | /** | ||
436 | * show_scaling_available_governors - show the available CPUfreq governors | ||
437 | */ | ||
438 | static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, | ||
439 | char *buf) | ||
440 | { | ||
441 | ssize_t i = 0; | ||
442 | struct cpufreq_governor *t; | ||
443 | |||
444 | if (!cpufreq_driver->target) { | ||
445 | i += sprintf(buf, "performance powersave"); | ||
446 | goto out; | ||
447 | } | ||
448 | |||
449 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
450 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) | ||
451 | goto out; | ||
452 | i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); | ||
453 | } | ||
454 | out: | ||
455 | i += sprintf(&buf[i], "\n"); | ||
456 | return i; | ||
457 | } | ||
458 | /** | ||
459 | * show_affected_cpus - show the CPUs affected by each transition | ||
460 | */ | ||
461 | static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf) | ||
462 | { | ||
463 | ssize_t i = 0; | ||
464 | unsigned int cpu; | ||
465 | |||
466 | for_each_cpu_mask(cpu, policy->cpus) { | ||
467 | if (i) | ||
468 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | ||
469 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | ||
470 | if (i >= (PAGE_SIZE - 5)) | ||
471 | break; | ||
472 | } | ||
473 | i += sprintf(&buf[i], "\n"); | ||
474 | return i; | ||
475 | } | ||
476 | |||
477 | |||
478 | #define define_one_ro(_name) \ | ||
479 | static struct freq_attr _name = \ | ||
480 | __ATTR(_name, 0444, show_##_name, NULL) | ||
481 | |||
482 | #define define_one_ro0400(_name) \ | ||
483 | static struct freq_attr _name = \ | ||
484 | __ATTR(_name, 0400, show_##_name, NULL) | ||
485 | |||
486 | #define define_one_rw(_name) \ | ||
487 | static struct freq_attr _name = \ | ||
488 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
489 | |||
490 | define_one_ro0400(cpuinfo_cur_freq); | ||
491 | define_one_ro(cpuinfo_min_freq); | ||
492 | define_one_ro(cpuinfo_max_freq); | ||
493 | define_one_ro(scaling_available_governors); | ||
494 | define_one_ro(scaling_driver); | ||
495 | define_one_ro(scaling_cur_freq); | ||
496 | define_one_ro(affected_cpus); | ||
497 | define_one_rw(scaling_min_freq); | ||
498 | define_one_rw(scaling_max_freq); | ||
499 | define_one_rw(scaling_governor); | ||
500 | |||
501 | static struct attribute * default_attrs[] = { | ||
502 | &cpuinfo_min_freq.attr, | ||
503 | &cpuinfo_max_freq.attr, | ||
504 | &scaling_min_freq.attr, | ||
505 | &scaling_max_freq.attr, | ||
506 | &affected_cpus.attr, | ||
507 | &scaling_governor.attr, | ||
508 | &scaling_driver.attr, | ||
509 | &scaling_available_governors.attr, | ||
510 | NULL | ||
511 | }; | ||
512 | |||
513 | #define to_policy(k) container_of(k,struct cpufreq_policy,kobj) | ||
514 | #define to_attr(a) container_of(a,struct freq_attr,attr) | ||
515 | |||
516 | static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | ||
517 | { | ||
518 | struct cpufreq_policy * policy = to_policy(kobj); | ||
519 | struct freq_attr * fattr = to_attr(attr); | ||
520 | ssize_t ret; | ||
521 | policy = cpufreq_cpu_get(policy->cpu); | ||
522 | if (!policy) | ||
523 | return -EINVAL; | ||
524 | ret = fattr->show ? fattr->show(policy,buf) : 0; | ||
525 | cpufreq_cpu_put(policy); | ||
526 | return ret; | ||
527 | } | ||
528 | |||
529 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | ||
530 | const char * buf, size_t count) | ||
531 | { | ||
532 | struct cpufreq_policy * policy = to_policy(kobj); | ||
533 | struct freq_attr * fattr = to_attr(attr); | ||
534 | ssize_t ret; | ||
535 | policy = cpufreq_cpu_get(policy->cpu); | ||
536 | if (!policy) | ||
537 | return -EINVAL; | ||
538 | ret = fattr->store ? fattr->store(policy,buf,count) : 0; | ||
539 | cpufreq_cpu_put(policy); | ||
540 | return ret; | ||
541 | } | ||
542 | |||
543 | static void cpufreq_sysfs_release(struct kobject * kobj) | ||
544 | { | ||
545 | struct cpufreq_policy * policy = to_policy(kobj); | ||
546 | dprintk("last reference is dropped\n"); | ||
547 | complete(&policy->kobj_unregister); | ||
548 | } | ||
549 | |||
550 | static struct sysfs_ops sysfs_ops = { | ||
551 | .show = show, | ||
552 | .store = store, | ||
553 | }; | ||
554 | |||
555 | static struct kobj_type ktype_cpufreq = { | ||
556 | .sysfs_ops = &sysfs_ops, | ||
557 | .default_attrs = default_attrs, | ||
558 | .release = cpufreq_sysfs_release, | ||
559 | }; | ||
560 | |||
561 | |||
562 | /** | ||
563 | * cpufreq_add_dev - add a CPU device | ||
564 | * | ||
565 | * Adds the cpufreq interface for a CPU device. | ||
566 | */ | ||
567 | static int cpufreq_add_dev (struct sys_device * sys_dev) | ||
568 | { | ||
569 | unsigned int cpu = sys_dev->id; | ||
570 | int ret = 0; | ||
571 | struct cpufreq_policy new_policy; | ||
572 | struct cpufreq_policy *policy; | ||
573 | struct freq_attr **drv_attr; | ||
574 | unsigned long flags; | ||
575 | unsigned int j; | ||
576 | |||
577 | cpufreq_debug_disable_ratelimit(); | ||
578 | dprintk("adding CPU %u\n", cpu); | ||
579 | |||
580 | #ifdef CONFIG_SMP | ||
581 | /* check whether a different CPU already registered this | ||
582 | * CPU because it is in the same boat. */ | ||
583 | policy = cpufreq_cpu_get(cpu); | ||
584 | if (unlikely(policy)) { | ||
585 | cpu_sys_devices[cpu] = sys_dev; | ||
586 | dprintk("CPU already managed, adding link\n"); | ||
587 | sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); | ||
588 | cpufreq_debug_enable_ratelimit(); | ||
589 | return 0; | ||
590 | } | ||
591 | #endif | ||
592 | |||
593 | if (!try_module_get(cpufreq_driver->owner)) { | ||
594 | ret = -EINVAL; | ||
595 | goto module_out; | ||
596 | } | ||
597 | |||
598 | policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | ||
599 | if (!policy) { | ||
600 | ret = -ENOMEM; | ||
601 | goto nomem_out; | ||
602 | } | ||
603 | memset(policy, 0, sizeof(struct cpufreq_policy)); | ||
604 | |||
605 | policy->cpu = cpu; | ||
606 | policy->cpus = cpumask_of_cpu(cpu); | ||
607 | |||
608 | init_MUTEX_LOCKED(&policy->lock); | ||
609 | init_completion(&policy->kobj_unregister); | ||
610 | INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); | ||
611 | |||
612 | /* call driver. From then on the cpufreq must be able | ||
613 | * to accept all calls to ->verify and ->setpolicy for this CPU | ||
614 | */ | ||
615 | ret = cpufreq_driver->init(policy); | ||
616 | if (ret) { | ||
617 | dprintk("initialization failed\n"); | ||
618 | goto err_out; | ||
619 | } | ||
620 | |||
621 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | ||
622 | |||
623 | /* prepare interface data */ | ||
624 | policy->kobj.parent = &sys_dev->kobj; | ||
625 | policy->kobj.ktype = &ktype_cpufreq; | ||
626 | strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); | ||
627 | |||
628 | ret = kobject_register(&policy->kobj); | ||
629 | if (ret) | ||
630 | goto err_out; | ||
631 | |||
632 | /* set up files for this cpu device */ | ||
633 | drv_attr = cpufreq_driver->attr; | ||
634 | while ((drv_attr) && (*drv_attr)) { | ||
635 | sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | ||
636 | drv_attr++; | ||
637 | } | ||
638 | if (cpufreq_driver->get) | ||
639 | sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | ||
640 | if (cpufreq_driver->target) | ||
641 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | ||
642 | |||
643 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
644 | for_each_cpu_mask(j, policy->cpus) | ||
645 | cpufreq_cpu_data[j] = policy; | ||
646 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
647 | policy->governor = NULL; /* to assure that the starting sequence is | ||
648 | * run in cpufreq_set_policy */ | ||
649 | up(&policy->lock); | ||
650 | |||
651 | /* set default policy */ | ||
652 | |||
653 | ret = cpufreq_set_policy(&new_policy); | ||
654 | if (ret) { | ||
655 | dprintk("setting policy failed\n"); | ||
656 | goto err_out_unregister; | ||
657 | } | ||
658 | |||
659 | module_put(cpufreq_driver->owner); | ||
660 | cpu_sys_devices[cpu] = sys_dev; | ||
661 | dprintk("initialization complete\n"); | ||
662 | cpufreq_debug_enable_ratelimit(); | ||
663 | |||
664 | return 0; | ||
665 | |||
666 | |||
667 | err_out_unregister: | ||
668 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
669 | for_each_cpu_mask(j, policy->cpus) | ||
670 | cpufreq_cpu_data[j] = NULL; | ||
671 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
672 | |||
673 | kobject_unregister(&policy->kobj); | ||
674 | wait_for_completion(&policy->kobj_unregister); | ||
675 | |||
676 | err_out: | ||
677 | kfree(policy); | ||
678 | |||
679 | nomem_out: | ||
680 | module_put(cpufreq_driver->owner); | ||
681 | module_out: | ||
682 | cpufreq_debug_enable_ratelimit(); | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | |||
687 | /** | ||
688 | * cpufreq_remove_dev - remove a CPU device | ||
689 | * | ||
690 | * Removes the cpufreq interface for a CPU device. | ||
691 | */ | ||
692 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
693 | { | ||
694 | unsigned int cpu = sys_dev->id; | ||
695 | unsigned long flags; | ||
696 | struct cpufreq_policy *data; | ||
697 | #ifdef CONFIG_SMP | ||
698 | unsigned int j; | ||
699 | #endif | ||
700 | |||
701 | cpufreq_debug_disable_ratelimit(); | ||
702 | dprintk("unregistering CPU %u\n", cpu); | ||
703 | |||
704 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
705 | data = cpufreq_cpu_data[cpu]; | ||
706 | |||
707 | if (!data) { | ||
708 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
709 | cpu_sys_devices[cpu] = NULL; | ||
710 | cpufreq_debug_enable_ratelimit(); | ||
711 | return -EINVAL; | ||
712 | } | ||
713 | cpufreq_cpu_data[cpu] = NULL; | ||
714 | |||
715 | |||
716 | #ifdef CONFIG_SMP | ||
717 | /* if this isn't the CPU which is the parent of the kobj, we | ||
718 | * only need to unlink, put and exit | ||
719 | */ | ||
720 | if (unlikely(cpu != data->cpu)) { | ||
721 | dprintk("removing link\n"); | ||
722 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
723 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | ||
724 | cpu_sys_devices[cpu] = NULL; | ||
725 | cpufreq_cpu_put(data); | ||
726 | cpufreq_debug_enable_ratelimit(); | ||
727 | return 0; | ||
728 | } | ||
729 | #endif | ||
730 | |||
731 | cpu_sys_devices[cpu] = NULL; | ||
732 | |||
733 | if (!kobject_get(&data->kobj)) { | ||
734 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
735 | cpufreq_debug_enable_ratelimit(); | ||
736 | return -EFAULT; | ||
737 | } | ||
738 | |||
739 | #ifdef CONFIG_SMP | ||
740 | /* if we have other CPUs still registered, we need to unlink them, | ||
741 | * or else wait_for_completion below will lock up. Clean the | ||
742 | * cpufreq_cpu_data[] while holding the lock, and remove the sysfs | ||
743 | * links afterwards. | ||
744 | */ | ||
745 | if (unlikely(cpus_weight(data->cpus) > 1)) { | ||
746 | for_each_cpu_mask(j, data->cpus) { | ||
747 | if (j == cpu) | ||
748 | continue; | ||
749 | cpufreq_cpu_data[j] = NULL; | ||
750 | } | ||
751 | } | ||
752 | |||
753 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
754 | |||
755 | if (unlikely(cpus_weight(data->cpus) > 1)) { | ||
756 | for_each_cpu_mask(j, data->cpus) { | ||
757 | if (j == cpu) | ||
758 | continue; | ||
759 | dprintk("removing link for cpu %u\n", j); | ||
760 | sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq"); | ||
761 | cpufreq_cpu_put(data); | ||
762 | } | ||
763 | } | ||
764 | #else | ||
765 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
766 | #endif | ||
767 | |||
768 | down(&data->lock); | ||
769 | if (cpufreq_driver->target) | ||
770 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
771 | cpufreq_driver->target = NULL; | ||
772 | up(&data->lock); | ||
773 | |||
774 | kobject_unregister(&data->kobj); | ||
775 | |||
776 | kobject_put(&data->kobj); | ||
777 | |||
778 | /* we need to make sure that the underlying kobj is actually | ||
779 | * not referenced anymore by anybody before we proceed with | ||
780 | * unloading. | ||
781 | */ | ||
782 | dprintk("waiting for dropping of refcount\n"); | ||
783 | wait_for_completion(&data->kobj_unregister); | ||
784 | dprintk("wait complete\n"); | ||
785 | |||
786 | if (cpufreq_driver->exit) | ||
787 | cpufreq_driver->exit(data); | ||
788 | |||
789 | kfree(data); | ||
790 | |||
791 | cpufreq_debug_enable_ratelimit(); | ||
792 | |||
793 | return 0; | ||
794 | } | ||
795 | |||
796 | |||
797 | static void handle_update(void *data) | ||
798 | { | ||
799 | unsigned int cpu = (unsigned int)(long)data; | ||
800 | dprintk("handle_update for cpu %u called\n", cpu); | ||
801 | cpufreq_update_policy(cpu); | ||
802 | } | ||
803 | |||
804 | /** | ||
805 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. | ||
806 | * @cpu: cpu number | ||
807 | * @old_freq: CPU frequency the kernel thinks the CPU runs at | ||
808 | * @new_freq: CPU frequency the CPU actually runs at | ||
809 | * | ||
810 | * We adjust to current frequency first, and need to clean up later. So either call | ||
811 | * to cpufreq_update_policy() or schedule handle_update()). | ||
812 | */ | ||
813 | static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) | ||
814 | { | ||
815 | struct cpufreq_freqs freqs; | ||
816 | |||
817 | printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing " | ||
818 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | ||
819 | |||
820 | freqs.cpu = cpu; | ||
821 | freqs.old = old_freq; | ||
822 | freqs.new = new_freq; | ||
823 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
824 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
825 | } | ||
826 | |||
827 | |||
828 | /** | ||
829 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
830 | * @cpu: CPU number | ||
831 | * | ||
832 | * Get the CPU current (static) CPU frequency | ||
833 | */ | ||
834 | unsigned int cpufreq_get(unsigned int cpu) | ||
835 | { | ||
836 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
837 | unsigned int ret = 0; | ||
838 | |||
839 | if (!policy) | ||
840 | return 0; | ||
841 | |||
842 | if (!cpufreq_driver->get) | ||
843 | goto out; | ||
844 | |||
845 | down(&policy->lock); | ||
846 | |||
847 | ret = cpufreq_driver->get(cpu); | ||
848 | |||
849 | if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) | ||
850 | { | ||
851 | /* verify no discrepancy between actual and saved value exists */ | ||
852 | if (unlikely(ret != policy->cur)) { | ||
853 | cpufreq_out_of_sync(cpu, policy->cur, ret); | ||
854 | schedule_work(&policy->update); | ||
855 | } | ||
856 | } | ||
857 | |||
858 | up(&policy->lock); | ||
859 | |||
860 | out: | ||
861 | cpufreq_cpu_put(policy); | ||
862 | |||
863 | return (ret); | ||
864 | } | ||
865 | EXPORT_SYMBOL(cpufreq_get); | ||
866 | |||
867 | |||
868 | /** | ||
869 | * cpufreq_resume - restore proper CPU frequency handling after resume | ||
870 | * | ||
871 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | ||
872 | * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync | ||
873 | * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are restored. | ||
874 | */ | ||
875 | static int cpufreq_resume(struct sys_device * sysdev) | ||
876 | { | ||
877 | int cpu = sysdev->id; | ||
878 | unsigned int ret = 0; | ||
879 | struct cpufreq_policy *cpu_policy; | ||
880 | |||
881 | dprintk("resuming cpu %u\n", cpu); | ||
882 | |||
883 | if (!cpu_online(cpu)) | ||
884 | return 0; | ||
885 | |||
886 | /* we may be lax here as interrupts are off. Nonetheless | ||
887 | * we need to grab the correct cpu policy, as to check | ||
888 | * whether we really run on this CPU. | ||
889 | */ | ||
890 | |||
891 | cpu_policy = cpufreq_cpu_get(cpu); | ||
892 | if (!cpu_policy) | ||
893 | return -EINVAL; | ||
894 | |||
895 | /* only handle each CPU group once */ | ||
896 | if (unlikely(cpu_policy->cpu != cpu)) { | ||
897 | cpufreq_cpu_put(cpu_policy); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | if (cpufreq_driver->resume) { | ||
902 | ret = cpufreq_driver->resume(cpu_policy); | ||
903 | if (ret) { | ||
904 | printk(KERN_ERR "cpufreq: resume failed in ->resume " | ||
905 | "step on CPU %u\n", cpu_policy->cpu); | ||
906 | cpufreq_cpu_put(cpu_policy); | ||
907 | return ret; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
912 | unsigned int cur_freq = 0; | ||
913 | |||
914 | if (cpufreq_driver->get) | ||
915 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
916 | |||
917 | if (!cur_freq || !cpu_policy->cur) { | ||
918 | printk(KERN_ERR "cpufreq: resume failed to assert current frequency is what timing core thinks it is.\n"); | ||
919 | goto out; | ||
920 | } | ||
921 | |||
922 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
923 | struct cpufreq_freqs freqs; | ||
924 | |||
925 | printk(KERN_WARNING "Warning: CPU frequency is %u, " | ||
926 | "cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur); | ||
927 | |||
928 | freqs.cpu = cpu; | ||
929 | freqs.old = cpu_policy->cur; | ||
930 | freqs.new = cur_freq; | ||
931 | |||
932 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); | ||
933 | adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); | ||
934 | |||
935 | cpu_policy->cur = cur_freq; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | out: | ||
940 | schedule_work(&cpu_policy->update); | ||
941 | cpufreq_cpu_put(cpu_policy); | ||
942 | return ret; | ||
943 | } | ||
944 | |||
945 | static struct sysdev_driver cpufreq_sysdev_driver = { | ||
946 | .add = cpufreq_add_dev, | ||
947 | .remove = cpufreq_remove_dev, | ||
948 | .resume = cpufreq_resume, | ||
949 | }; | ||
950 | |||
951 | |||
952 | /********************************************************************* | ||
953 | * NOTIFIER LISTS INTERFACE * | ||
954 | *********************************************************************/ | ||
955 | |||
956 | /** | ||
957 | * cpufreq_register_notifier - register a driver with cpufreq | ||
958 | * @nb: notifier function to register | ||
959 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | ||
960 | * | ||
961 | * Add a driver to one of two lists: either a list of drivers that | ||
962 | * are notified about clock rate changes (once before and once after | ||
963 | * the transition), or a list of drivers that are notified about | ||
964 | * changes in cpufreq policy. | ||
965 | * | ||
966 | * This function may sleep, and has the same return conditions as | ||
967 | * notifier_chain_register. | ||
968 | */ | ||
969 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | ||
970 | { | ||
971 | int ret; | ||
972 | |||
973 | down_write(&cpufreq_notifier_rwsem); | ||
974 | switch (list) { | ||
975 | case CPUFREQ_TRANSITION_NOTIFIER: | ||
976 | ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); | ||
977 | break; | ||
978 | case CPUFREQ_POLICY_NOTIFIER: | ||
979 | ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); | ||
980 | break; | ||
981 | default: | ||
982 | ret = -EINVAL; | ||
983 | } | ||
984 | up_write(&cpufreq_notifier_rwsem); | ||
985 | |||
986 | return ret; | ||
987 | } | ||
988 | EXPORT_SYMBOL(cpufreq_register_notifier); | ||
989 | |||
990 | |||
991 | /** | ||
992 | * cpufreq_unregister_notifier - unregister a driver with cpufreq | ||
993 | * @nb: notifier block to be unregistered | ||
994 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | ||
995 | * | ||
996 | * Remove a driver from the CPU frequency notifier list. | ||
997 | * | ||
998 | * This function may sleep, and has the same return conditions as | ||
999 | * notifier_chain_unregister. | ||
1000 | */ | ||
1001 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | ||
1002 | { | ||
1003 | int ret; | ||
1004 | |||
1005 | down_write(&cpufreq_notifier_rwsem); | ||
1006 | switch (list) { | ||
1007 | case CPUFREQ_TRANSITION_NOTIFIER: | ||
1008 | ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); | ||
1009 | break; | ||
1010 | case CPUFREQ_POLICY_NOTIFIER: | ||
1011 | ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); | ||
1012 | break; | ||
1013 | default: | ||
1014 | ret = -EINVAL; | ||
1015 | } | ||
1016 | up_write(&cpufreq_notifier_rwsem); | ||
1017 | |||
1018 | return ret; | ||
1019 | } | ||
1020 | EXPORT_SYMBOL(cpufreq_unregister_notifier); | ||
1021 | |||
1022 | |||
1023 | /********************************************************************* | ||
1024 | * GOVERNORS * | ||
1025 | *********************************************************************/ | ||
1026 | |||
1027 | |||
1028 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | ||
1029 | unsigned int target_freq, | ||
1030 | unsigned int relation) | ||
1031 | { | ||
1032 | int retval = -EINVAL; | ||
1033 | lock_cpu_hotplug(); | ||
1034 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | ||
1035 | target_freq, relation); | ||
1036 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | ||
1037 | retval = cpufreq_driver->target(policy, target_freq, relation); | ||
1038 | unlock_cpu_hotplug(); | ||
1039 | return retval; | ||
1040 | } | ||
1041 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | ||
1042 | |||
1043 | |||
1044 | int cpufreq_driver_target(struct cpufreq_policy *policy, | ||
1045 | unsigned int target_freq, | ||
1046 | unsigned int relation) | ||
1047 | { | ||
1048 | unsigned int ret; | ||
1049 | |||
1050 | policy = cpufreq_cpu_get(policy->cpu); | ||
1051 | if (!policy) | ||
1052 | return -EINVAL; | ||
1053 | |||
1054 | down(&policy->lock); | ||
1055 | |||
1056 | ret = __cpufreq_driver_target(policy, target_freq, relation); | ||
1057 | |||
1058 | up(&policy->lock); | ||
1059 | |||
1060 | cpufreq_cpu_put(policy); | ||
1061 | |||
1062 | return ret; | ||
1063 | } | ||
1064 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | ||
1065 | |||
1066 | |||
1067 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) | ||
1068 | { | ||
1069 | int ret = -EINVAL; | ||
1070 | |||
1071 | if (!try_module_get(policy->governor->owner)) | ||
1072 | return -EINVAL; | ||
1073 | |||
1074 | dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); | ||
1075 | ret = policy->governor->governor(policy, event); | ||
1076 | |||
1077 | /* we keep one module reference alive for each CPU governed by this CPU */ | ||
1078 | if ((event != CPUFREQ_GOV_START) || ret) | ||
1079 | module_put(policy->governor->owner); | ||
1080 | if ((event == CPUFREQ_GOV_STOP) && !ret) | ||
1081 | module_put(policy->governor->owner); | ||
1082 | |||
1083 | return ret; | ||
1084 | } | ||
1085 | |||
1086 | |||
1087 | int cpufreq_governor(unsigned int cpu, unsigned int event) | ||
1088 | { | ||
1089 | int ret = 0; | ||
1090 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1091 | |||
1092 | if (!policy) | ||
1093 | return -EINVAL; | ||
1094 | |||
1095 | down(&policy->lock); | ||
1096 | ret = __cpufreq_governor(policy, event); | ||
1097 | up(&policy->lock); | ||
1098 | |||
1099 | cpufreq_cpu_put(policy); | ||
1100 | |||
1101 | return ret; | ||
1102 | } | ||
1103 | EXPORT_SYMBOL_GPL(cpufreq_governor); | ||
1104 | |||
1105 | |||
1106 | int cpufreq_register_governor(struct cpufreq_governor *governor) | ||
1107 | { | ||
1108 | struct cpufreq_governor *t; | ||
1109 | |||
1110 | if (!governor) | ||
1111 | return -EINVAL; | ||
1112 | |||
1113 | down(&cpufreq_governor_sem); | ||
1114 | |||
1115 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
1116 | if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { | ||
1117 | up(&cpufreq_governor_sem); | ||
1118 | return -EBUSY; | ||
1119 | } | ||
1120 | } | ||
1121 | list_add(&governor->governor_list, &cpufreq_governor_list); | ||
1122 | |||
1123 | up(&cpufreq_governor_sem); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); | ||
1128 | |||
1129 | |||
1130 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) | ||
1131 | { | ||
1132 | if (!governor) | ||
1133 | return; | ||
1134 | |||
1135 | down(&cpufreq_governor_sem); | ||
1136 | list_del(&governor->governor_list); | ||
1137 | up(&cpufreq_governor_sem); | ||
1138 | return; | ||
1139 | } | ||
1140 | EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); | ||
1141 | |||
1142 | |||
1143 | |||
1144 | /********************************************************************* | ||
1145 | * POLICY INTERFACE * | ||
1146 | *********************************************************************/ | ||
1147 | |||
1148 | /** | ||
1149 | * cpufreq_get_policy - get the current cpufreq_policy | ||
1150 | * @policy: struct cpufreq_policy into which the current cpufreq_policy is written | ||
1151 | * | ||
1152 | * Reads the current cpufreq policy. | ||
1153 | */ | ||
1154 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | ||
1155 | { | ||
1156 | struct cpufreq_policy *cpu_policy; | ||
1157 | if (!policy) | ||
1158 | return -EINVAL; | ||
1159 | |||
1160 | cpu_policy = cpufreq_cpu_get(cpu); | ||
1161 | if (!cpu_policy) | ||
1162 | return -EINVAL; | ||
1163 | |||
1164 | down(&cpu_policy->lock); | ||
1165 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | ||
1166 | up(&cpu_policy->lock); | ||
1167 | |||
1168 | cpufreq_cpu_put(cpu_policy); | ||
1169 | |||
1170 | return 0; | ||
1171 | } | ||
1172 | EXPORT_SYMBOL(cpufreq_get_policy); | ||
1173 | |||
1174 | |||
1175 | static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) | ||
1176 | { | ||
1177 | int ret = 0; | ||
1178 | |||
1179 | cpufreq_debug_disable_ratelimit(); | ||
1180 | dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, | ||
1181 | policy->min, policy->max); | ||
1182 | |||
1183 | memcpy(&policy->cpuinfo, | ||
1184 | &data->cpuinfo, | ||
1185 | sizeof(struct cpufreq_cpuinfo)); | ||
1186 | |||
1187 | /* verify the cpu speed can be set within this limit */ | ||
1188 | ret = cpufreq_driver->verify(policy); | ||
1189 | if (ret) | ||
1190 | goto error_out; | ||
1191 | |||
1192 | down_read(&cpufreq_notifier_rwsem); | ||
1193 | |||
1194 | /* adjust if necessary - all reasons */ | ||
1195 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, | ||
1196 | policy); | ||
1197 | |||
1198 | /* adjust if necessary - hardware incompatibility*/ | ||
1199 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, | ||
1200 | policy); | ||
1201 | |||
1202 | /* verify the cpu speed can be set within this limit, | ||
1203 | which might be different to the first one */ | ||
1204 | ret = cpufreq_driver->verify(policy); | ||
1205 | if (ret) { | ||
1206 | up_read(&cpufreq_notifier_rwsem); | ||
1207 | goto error_out; | ||
1208 | } | ||
1209 | |||
1210 | /* notification of the new policy */ | ||
1211 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, | ||
1212 | policy); | ||
1213 | |||
1214 | up_read(&cpufreq_notifier_rwsem); | ||
1215 | |||
1216 | data->min = policy->min; | ||
1217 | data->max = policy->max; | ||
1218 | |||
1219 | dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); | ||
1220 | |||
1221 | if (cpufreq_driver->setpolicy) { | ||
1222 | data->policy = policy->policy; | ||
1223 | dprintk("setting range\n"); | ||
1224 | ret = cpufreq_driver->setpolicy(policy); | ||
1225 | } else { | ||
1226 | if (policy->governor != data->governor) { | ||
1227 | /* save old, working values */ | ||
1228 | struct cpufreq_governor *old_gov = data->governor; | ||
1229 | |||
1230 | dprintk("governor switch\n"); | ||
1231 | |||
1232 | /* end old governor */ | ||
1233 | if (data->governor) | ||
1234 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
1235 | |||
1236 | /* start new governor */ | ||
1237 | data->governor = policy->governor; | ||
1238 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { | ||
1239 | /* new governor failed, so re-start old one */ | ||
1240 | dprintk("starting governor %s failed\n", data->governor->name); | ||
1241 | if (old_gov) { | ||
1242 | data->governor = old_gov; | ||
1243 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1244 | } | ||
1245 | ret = -EINVAL; | ||
1246 | goto error_out; | ||
1247 | } | ||
1248 | /* might be a policy change, too, so fall through */ | ||
1249 | } | ||
1250 | dprintk("governor: change or update limits\n"); | ||
1251 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1252 | } | ||
1253 | |||
1254 | error_out: | ||
1255 | cpufreq_debug_enable_ratelimit(); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | /** | ||
1260 | * cpufreq_set_policy - set a new CPUFreq policy | ||
1261 | * @policy: policy to be set. | ||
1262 | * | ||
1263 | * Sets a new CPU frequency and voltage scaling policy. | ||
1264 | */ | ||
1265 | int cpufreq_set_policy(struct cpufreq_policy *policy) | ||
1266 | { | ||
1267 | int ret = 0; | ||
1268 | struct cpufreq_policy *data; | ||
1269 | |||
1270 | if (!policy) | ||
1271 | return -EINVAL; | ||
1272 | |||
1273 | data = cpufreq_cpu_get(policy->cpu); | ||
1274 | if (!data) | ||
1275 | return -EINVAL; | ||
1276 | |||
1277 | /* lock this CPU */ | ||
1278 | down(&data->lock); | ||
1279 | |||
1280 | ret = __cpufreq_set_policy(data, policy); | ||
1281 | data->user_policy.min = data->min; | ||
1282 | data->user_policy.max = data->max; | ||
1283 | data->user_policy.policy = data->policy; | ||
1284 | data->user_policy.governor = data->governor; | ||
1285 | |||
1286 | up(&data->lock); | ||
1287 | cpufreq_cpu_put(data); | ||
1288 | |||
1289 | return ret; | ||
1290 | } | ||
1291 | EXPORT_SYMBOL(cpufreq_set_policy); | ||
1292 | |||
1293 | |||
1294 | /** | ||
1295 | * cpufreq_update_policy - re-evaluate an existing cpufreq policy | ||
1296 | * @cpu: CPU which shall be re-evaluated | ||
1297 | * | ||
1298 | * Usefull for policy notifiers which have different necessities | ||
1299 | * at different times. | ||
1300 | */ | ||
1301 | int cpufreq_update_policy(unsigned int cpu) | ||
1302 | { | ||
1303 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); | ||
1304 | struct cpufreq_policy policy; | ||
1305 | int ret = 0; | ||
1306 | |||
1307 | if (!data) | ||
1308 | return -ENODEV; | ||
1309 | |||
1310 | down(&data->lock); | ||
1311 | |||
1312 | dprintk("updating policy for CPU %u\n", cpu); | ||
1313 | memcpy(&policy, | ||
1314 | data, | ||
1315 | sizeof(struct cpufreq_policy)); | ||
1316 | policy.min = data->user_policy.min; | ||
1317 | policy.max = data->user_policy.max; | ||
1318 | policy.policy = data->user_policy.policy; | ||
1319 | policy.governor = data->user_policy.governor; | ||
1320 | |||
1321 | ret = __cpufreq_set_policy(data, &policy); | ||
1322 | |||
1323 | up(&data->lock); | ||
1324 | |||
1325 | cpufreq_cpu_put(data); | ||
1326 | return ret; | ||
1327 | } | ||
1328 | EXPORT_SYMBOL(cpufreq_update_policy); | ||
1329 | |||
1330 | |||
1331 | /********************************************************************* | ||
1332 | * REGISTER / UNREGISTER CPUFREQ DRIVER * | ||
1333 | *********************************************************************/ | ||
1334 | |||
1335 | /** | ||
1336 | * cpufreq_register_driver - register a CPU Frequency driver | ||
1337 | * @driver_data: A struct cpufreq_driver containing the values# | ||
1338 | * submitted by the CPU Frequency driver. | ||
1339 | * | ||
1340 | * Registers a CPU Frequency driver to this core code. This code | ||
1341 | * returns zero on success, -EBUSY when another driver got here first | ||
1342 | * (and isn't unregistered in the meantime). | ||
1343 | * | ||
1344 | */ | ||
1345 | int cpufreq_register_driver(struct cpufreq_driver *driver_data) | ||
1346 | { | ||
1347 | unsigned long flags; | ||
1348 | int ret; | ||
1349 | |||
1350 | if (!driver_data || !driver_data->verify || !driver_data->init || | ||
1351 | ((!driver_data->setpolicy) && (!driver_data->target))) | ||
1352 | return -EINVAL; | ||
1353 | |||
1354 | dprintk("trying to register driver %s\n", driver_data->name); | ||
1355 | |||
1356 | if (driver_data->setpolicy) | ||
1357 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | ||
1358 | |||
1359 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1360 | if (cpufreq_driver) { | ||
1361 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1362 | return -EBUSY; | ||
1363 | } | ||
1364 | cpufreq_driver = driver_data; | ||
1365 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1366 | |||
1367 | ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); | ||
1368 | |||
1369 | if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { | ||
1370 | int i; | ||
1371 | ret = -ENODEV; | ||
1372 | |||
1373 | /* check for at least one working CPU */ | ||
1374 | for (i=0; i<NR_CPUS; i++) | ||
1375 | if (cpufreq_cpu_data[i]) | ||
1376 | ret = 0; | ||
1377 | |||
1378 | /* if all ->init() calls failed, unregister */ | ||
1379 | if (ret) { | ||
1380 | dprintk("no CPU initialized for driver %s\n", driver_data->name); | ||
1381 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | ||
1382 | |||
1383 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1384 | cpufreq_driver = NULL; | ||
1385 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | if (!ret) { | ||
1390 | dprintk("driver %s up and running\n", driver_data->name); | ||
1391 | cpufreq_debug_enable_ratelimit(); | ||
1392 | } | ||
1393 | |||
1394 | return (ret); | ||
1395 | } | ||
1396 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | ||
1397 | |||
1398 | |||
1399 | /** | ||
1400 | * cpufreq_unregister_driver - unregister the current CPUFreq driver | ||
1401 | * | ||
1402 | * Unregister the current CPUFreq driver. Only call this if you have | ||
1403 | * the right to do so, i.e. if you have succeeded in initialising before! | ||
1404 | * Returns zero if successful, and -EINVAL if the cpufreq_driver is | ||
1405 | * currently not initialised. | ||
1406 | */ | ||
1407 | int cpufreq_unregister_driver(struct cpufreq_driver *driver) | ||
1408 | { | ||
1409 | unsigned long flags; | ||
1410 | |||
1411 | cpufreq_debug_disable_ratelimit(); | ||
1412 | |||
1413 | if (!cpufreq_driver || (driver != cpufreq_driver)) { | ||
1414 | cpufreq_debug_enable_ratelimit(); | ||
1415 | return -EINVAL; | ||
1416 | } | ||
1417 | |||
1418 | dprintk("unregistering driver %s\n", driver->name); | ||
1419 | |||
1420 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | ||
1421 | |||
1422 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1423 | cpufreq_driver = NULL; | ||
1424 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1425 | |||
1426 | return 0; | ||
1427 | } | ||
1428 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | ||