diff options
| author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
| commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
| tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/cpufreq | |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/cpufreq')
| -rw-r--r-- | drivers/cpufreq/Kconfig | 118 | ||||
| -rw-r--r-- | drivers/cpufreq/Makefile | 14 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq.c | 1428 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 491 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 61 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 59 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 334 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 207 | ||||
| -rw-r--r-- | drivers/cpufreq/freq_table.c | 225 |
9 files changed, 2937 insertions, 0 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig new file mode 100644 index 000000000000..95882bb1950e --- /dev/null +++ b/drivers/cpufreq/Kconfig | |||
| @@ -0,0 +1,118 @@ | |||
| 1 | config CPU_FREQ | ||
| 2 | bool "CPU Frequency scaling" | ||
| 3 | help | ||
| 4 | CPU Frequency scaling allows you to change the clock speed of | ||
| 5 | CPUs on the fly. This is a nice method to save power, because | ||
| 6 | the lower the CPU clock speed, the less power the CPU consumes. | ||
| 7 | |||
| 8 | Note that this driver doesn't automatically change the CPU | ||
| 9 | clock speed, you need to either enable a dynamic cpufreq governor | ||
| 10 | (see below) after boot, or use a userspace tool. | ||
| 11 | |||
| 12 | For details, take a look at <file:Documentation/cpu-freq>. | ||
| 13 | |||
| 14 | If in doubt, say N. | ||
| 15 | |||
| 16 | if CPU_FREQ | ||
| 17 | |||
| 18 | config CPU_FREQ_TABLE | ||
| 19 | def_tristate m | ||
| 20 | |||
| 21 | config CPU_FREQ_DEBUG | ||
| 22 | bool "Enable CPUfreq debugging" | ||
| 23 | help | ||
| 24 | Say Y here to enable CPUfreq subsystem (including drivers) | ||
| 25 | debugging. You will need to activate it via the kernel | ||
| 26 | command line by passing | ||
| 27 | cpufreq.debug=<value> | ||
| 28 | |||
| 29 | To get <value>, add | ||
| 30 | 1 to activate CPUfreq core debugging, | ||
| 31 | 2 to activate CPUfreq drivers debugging, and | ||
| 32 | 4 to activate CPUfreq governor debugging | ||
| 33 | |||
| 34 | config CPU_FREQ_STAT | ||
| 35 | tristate "CPU frequency translation statistics" | ||
| 36 | select CPU_FREQ_TABLE | ||
| 37 | default y | ||
| 38 | help | ||
| 39 | This driver exports CPU frequency statistics information through sysfs | ||
| 40 | file system | ||
| 41 | |||
| 42 | config CPU_FREQ_STAT_DETAILS | ||
| 43 | bool "CPU frequency translation statistics details" | ||
| 44 | depends on CPU_FREQ_STAT | ||
| 45 | help | ||
| 46 | This will show detail CPU frequency translation table in sysfs file | ||
| 47 | system | ||
| 48 | |||
| 49 | choice | ||
| 50 | prompt "Default CPUFreq governor" | ||
| 51 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 | ||
| 52 | default CPU_FREQ_DEFAULT_GOV_PERFORMANCE | ||
| 53 | help | ||
| 54 | This option sets which CPUFreq governor shall be loaded at | ||
| 55 | startup. If in doubt, select 'performance'. | ||
| 56 | |||
| 57 | config CPU_FREQ_DEFAULT_GOV_PERFORMANCE | ||
| 58 | bool "performance" | ||
| 59 | select CPU_FREQ_GOV_PERFORMANCE | ||
| 60 | help | ||
| 61 | Use the CPUFreq governor 'performance' as default. This sets | ||
| 62 | the frequency statically to the highest frequency supported by | ||
| 63 | the CPU. | ||
| 64 | |||
| 65 | config CPU_FREQ_DEFAULT_GOV_USERSPACE | ||
| 66 | bool "userspace" | ||
| 67 | select CPU_FREQ_GOV_USERSPACE | ||
| 68 | help | ||
| 69 | Use the CPUFreq governor 'userspace' as default. This allows | ||
| 70 | you to set the CPU frequency manually or when an userspace | ||
| 71 | program shall be able to set the CPU dynamically without having | ||
| 72 | to enable the userspace governor manually. | ||
| 73 | |||
| 74 | endchoice | ||
| 75 | |||
| 76 | config CPU_FREQ_GOV_PERFORMANCE | ||
| 77 | tristate "'performance' governor" | ||
| 78 | help | ||
| 79 | This cpufreq governor sets the frequency statically to the | ||
| 80 | highest available CPU frequency. | ||
| 81 | |||
| 82 | If in doubt, say Y. | ||
| 83 | |||
| 84 | config CPU_FREQ_GOV_POWERSAVE | ||
| 85 | tristate "'powersave' governor" | ||
| 86 | help | ||
| 87 | This cpufreq governor sets the frequency statically to the | ||
| 88 | lowest available CPU frequency. | ||
| 89 | |||
| 90 | If in doubt, say Y. | ||
| 91 | |||
| 92 | config CPU_FREQ_GOV_USERSPACE | ||
| 93 | tristate "'userspace' governor for userspace frequency scaling" | ||
| 94 | help | ||
| 95 | Enable this cpufreq governor when you either want to set the | ||
| 96 | CPU frequency manually or when an userspace program shall | ||
| 97 | be able to set the CPU dynamically, like on LART | ||
| 98 | <http://www.lart.tudelft.nl/> | ||
| 99 | |||
| 100 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
| 101 | |||
| 102 | If in doubt, say Y. | ||
| 103 | |||
| 104 | config CPU_FREQ_GOV_ONDEMAND | ||
| 105 | tristate "'ondemand' cpufreq policy governor" | ||
| 106 | help | ||
| 107 | 'ondemand' - This driver adds a dynamic cpufreq policy governor. | ||
| 108 | The governor does a periodic polling and | ||
| 109 | changes frequency based on the CPU utilization. | ||
| 110 | The support for this governor depends on CPU capability to | ||
| 111 | do fast frequency switching (i.e, very low latency frequency | ||
| 112 | transitions). | ||
| 113 | |||
| 114 | For details, take a look at linux/Documentation/cpu-freq. | ||
| 115 | |||
| 116 | If in doubt, say N. | ||
| 117 | |||
| 118 | endif # CPU_FREQ | ||
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile new file mode 100644 index 000000000000..67b16e5a41a7 --- /dev/null +++ b/drivers/cpufreq/Makefile | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | # CPUfreq core | ||
| 2 | obj-$(CONFIG_CPU_FREQ) += cpufreq.o | ||
| 3 | # CPUfreq stats | ||
| 4 | obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o | ||
| 5 | |||
| 6 | # CPUfreq governors | ||
| 7 | obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o | ||
| 8 | obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o | ||
| 9 | obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o | ||
| 10 | obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o | ||
| 11 | |||
| 12 | # CPUfreq cross-arch helpers | ||
| 13 | obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o | ||
| 14 | |||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c new file mode 100644 index 000000000000..b30001f31610 --- /dev/null +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -0,0 +1,1428 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/cpufreq/cpufreq.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2001 Russell King | ||
| 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/config.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/notifier.h> | ||
| 18 | #include <linux/cpufreq.h> | ||
| 19 | #include <linux/delay.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/spinlock.h> | ||
| 22 | #include <linux/device.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/cpu.h> | ||
| 25 | #include <linux/completion.h> | ||
| 26 | |||
| 27 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) | ||
| 28 | |||
| 29 | /** | ||
| 30 | * The "cpufreq driver" - the arch- or hardware-dependend low | ||
| 31 | * level driver of CPUFreq support, and its spinlock. This lock | ||
| 32 | * also protects the cpufreq_cpu_data array. | ||
| 33 | */ | ||
| 34 | static struct cpufreq_driver *cpufreq_driver; | ||
| 35 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | ||
| 36 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | ||
| 37 | |||
| 38 | |||
| 39 | /* we keep a copy of all ->add'ed CPU's struct sys_device here; | ||
| 40 | * as it is only accessed in ->add and ->remove, no lock or reference | ||
| 41 | * count is necessary. | ||
| 42 | */ | ||
| 43 | static struct sys_device *cpu_sys_devices[NR_CPUS]; | ||
| 44 | |||
| 45 | |||
| 46 | /* internal prototypes */ | ||
| 47 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | ||
| 48 | static void handle_update(void *data); | ||
| 49 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * Two notifier lists: the "policy" list is involved in the | ||
| 53 | * validation process for a new CPU frequency policy; the | ||
| 54 | * "transition" list for kernel code that needs to handle | ||
| 55 | * changes to devices when the CPU clock speed changes. | ||
| 56 | * The mutex locks both lists. | ||
| 57 | */ | ||
| 58 | static struct notifier_block *cpufreq_policy_notifier_list; | ||
| 59 | static struct notifier_block *cpufreq_transition_notifier_list; | ||
| 60 | static DECLARE_RWSEM (cpufreq_notifier_rwsem); | ||
| 61 | |||
| 62 | |||
| 63 | static LIST_HEAD(cpufreq_governor_list); | ||
| 64 | static DECLARE_MUTEX (cpufreq_governor_sem); | ||
| 65 | |||
| 66 | struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) | ||
| 67 | { | ||
| 68 | struct cpufreq_policy *data; | ||
| 69 | unsigned long flags; | ||
| 70 | |||
| 71 | if (cpu >= NR_CPUS) | ||
| 72 | goto err_out; | ||
| 73 | |||
| 74 | /* get the cpufreq driver */ | ||
| 75 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 76 | |||
| 77 | if (!cpufreq_driver) | ||
| 78 | goto err_out_unlock; | ||
| 79 | |||
| 80 | if (!try_module_get(cpufreq_driver->owner)) | ||
| 81 | goto err_out_unlock; | ||
| 82 | |||
| 83 | |||
| 84 | /* get the CPU */ | ||
| 85 | data = cpufreq_cpu_data[cpu]; | ||
| 86 | |||
| 87 | if (!data) | ||
| 88 | goto err_out_put_module; | ||
| 89 | |||
| 90 | if (!kobject_get(&data->kobj)) | ||
| 91 | goto err_out_put_module; | ||
| 92 | |||
| 93 | |||
| 94 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 95 | |||
| 96 | return data; | ||
| 97 | |||
| 98 | err_out_put_module: | ||
| 99 | module_put(cpufreq_driver->owner); | ||
| 100 | err_out_unlock: | ||
| 101 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 102 | err_out: | ||
| 103 | return NULL; | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | ||
| 106 | |||
| 107 | void cpufreq_cpu_put(struct cpufreq_policy *data) | ||
| 108 | { | ||
| 109 | kobject_put(&data->kobj); | ||
| 110 | module_put(cpufreq_driver->owner); | ||
| 111 | } | ||
| 112 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | ||
| 113 | |||
| 114 | |||
| 115 | /********************************************************************* | ||
| 116 | * UNIFIED DEBUG HELPERS * | ||
| 117 | *********************************************************************/ | ||
| 118 | #ifdef CONFIG_CPU_FREQ_DEBUG | ||
| 119 | |||
| 120 | /* what part(s) of the CPUfreq subsystem are debugged? */ | ||
| 121 | static unsigned int debug; | ||
| 122 | |||
| 123 | /* is the debug output ratelimit'ed using printk_ratelimit? User can | ||
| 124 | * set or modify this value. | ||
| 125 | */ | ||
| 126 | static unsigned int debug_ratelimit = 1; | ||
| 127 | |||
| 128 | /* is the printk_ratelimit'ing enabled? It's enabled after a successful | ||
| 129 | * loading of a cpufreq driver, temporarily disabled when a new policy | ||
| 130 | * is set, and disabled upon cpufreq driver removal | ||
| 131 | */ | ||
| 132 | static unsigned int disable_ratelimit = 1; | ||
| 133 | static DEFINE_SPINLOCK(disable_ratelimit_lock); | ||
| 134 | |||
| 135 | static inline void cpufreq_debug_enable_ratelimit(void) | ||
| 136 | { | ||
| 137 | unsigned long flags; | ||
| 138 | |||
| 139 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
| 140 | if (disable_ratelimit) | ||
| 141 | disable_ratelimit--; | ||
| 142 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
| 143 | } | ||
| 144 | |||
| 145 | static inline void cpufreq_debug_disable_ratelimit(void) | ||
| 146 | { | ||
| 147 | unsigned long flags; | ||
| 148 | |||
| 149 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
| 150 | disable_ratelimit++; | ||
| 151 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
| 152 | } | ||
| 153 | |||
| 154 | void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) | ||
| 155 | { | ||
| 156 | char s[256]; | ||
| 157 | va_list args; | ||
| 158 | unsigned int len; | ||
| 159 | unsigned long flags; | ||
| 160 | |||
| 161 | WARN_ON(!prefix); | ||
| 162 | if (type & debug) { | ||
| 163 | spin_lock_irqsave(&disable_ratelimit_lock, flags); | ||
| 164 | if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { | ||
| 165 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | spin_unlock_irqrestore(&disable_ratelimit_lock, flags); | ||
| 169 | |||
| 170 | len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); | ||
| 171 | |||
| 172 | va_start(args, fmt); | ||
| 173 | len += vsnprintf(&s[len], (256 - len), fmt, args); | ||
| 174 | va_end(args); | ||
| 175 | |||
| 176 | printk(s); | ||
| 177 | |||
| 178 | WARN_ON(len < 5); | ||
| 179 | } | ||
| 180 | } | ||
| 181 | EXPORT_SYMBOL(cpufreq_debug_printk); | ||
| 182 | |||
| 183 | |||
| 184 | module_param(debug, uint, 0644); | ||
| 185 | MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); | ||
| 186 | |||
| 187 | module_param(debug_ratelimit, uint, 0644); | ||
| 188 | MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); | ||
| 189 | |||
| 190 | #else /* !CONFIG_CPU_FREQ_DEBUG */ | ||
| 191 | |||
| 192 | static inline void cpufreq_debug_enable_ratelimit(void) { return; } | ||
| 193 | static inline void cpufreq_debug_disable_ratelimit(void) { return; } | ||
| 194 | |||
| 195 | #endif /* CONFIG_CPU_FREQ_DEBUG */ | ||
| 196 | |||
| 197 | |||
| 198 | /********************************************************************* | ||
| 199 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * | ||
| 200 | *********************************************************************/ | ||
| 201 | |||
| 202 | /** | ||
| 203 | * adjust_jiffies - adjust the system "loops_per_jiffy" | ||
| 204 | * | ||
| 205 | * This function alters the system "loops_per_jiffy" for the clock | ||
| 206 | * speed change. Note that loops_per_jiffy cannot be updated on SMP | ||
| 207 | * systems as each CPU might be scaled differently. So, use the arch | ||
| 208 | * per-CPU loops_per_jiffy value wherever possible. | ||
| 209 | */ | ||
| 210 | #ifndef CONFIG_SMP | ||
| 211 | static unsigned long l_p_j_ref; | ||
| 212 | static unsigned int l_p_j_ref_freq; | ||
| 213 | |||
| 214 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | ||
| 215 | { | ||
| 216 | if (ci->flags & CPUFREQ_CONST_LOOPS) | ||
| 217 | return; | ||
| 218 | |||
| 219 | if (!l_p_j_ref_freq) { | ||
| 220 | l_p_j_ref = loops_per_jiffy; | ||
| 221 | l_p_j_ref_freq = ci->old; | ||
| 222 | dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | ||
| 223 | } | ||
| 224 | if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || | ||
| 225 | (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || | ||
| 226 | (val == CPUFREQ_RESUMECHANGE)) { | ||
| 227 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); | ||
| 228 | dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); | ||
| 229 | } | ||
| 230 | } | ||
| 231 | #else | ||
| 232 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } | ||
| 233 | #endif | ||
| 234 | |||
| 235 | |||
| 236 | /** | ||
| 237 | * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition | ||
| 238 | * | ||
| 239 | * This function calls the transition notifiers and the "adjust_jiffies" function. It is called | ||
| 240 | * twice on all CPU frequency changes that have external effects. | ||
| 241 | */ | ||
| 242 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | ||
| 243 | { | ||
| 244 | BUG_ON(irqs_disabled()); | ||
| 245 | |||
| 246 | freqs->flags = cpufreq_driver->flags; | ||
| 247 | dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); | ||
| 248 | |||
| 249 | down_read(&cpufreq_notifier_rwsem); | ||
| 250 | switch (state) { | ||
| 251 | case CPUFREQ_PRECHANGE: | ||
| 252 | /* detect if the driver reported a value as "old frequency" which | ||
| 253 | * is not equal to what the cpufreq core thinks is "old frequency". | ||
| 254 | */ | ||
| 255 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
| 256 | if ((likely(cpufreq_cpu_data[freqs->cpu])) && | ||
| 257 | (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu)) && | ||
| 258 | (likely(cpufreq_cpu_data[freqs->cpu]->cur)) && | ||
| 259 | (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur))) | ||
| 260 | { | ||
| 261 | printk(KERN_WARNING "Warning: CPU frequency is %u, " | ||
| 262 | "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur); | ||
| 263 | freqs->old = cpufreq_cpu_data[freqs->cpu]->cur; | ||
| 264 | } | ||
| 265 | } | ||
| 266 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); | ||
| 267 | adjust_jiffies(CPUFREQ_PRECHANGE, freqs); | ||
| 268 | break; | ||
| 269 | case CPUFREQ_POSTCHANGE: | ||
| 270 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | ||
| 271 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); | ||
| 272 | if ((likely(cpufreq_cpu_data[freqs->cpu])) && | ||
| 273 | (likely(cpufreq_cpu_data[freqs->cpu]->cpu == freqs->cpu))) | ||
| 274 | cpufreq_cpu_data[freqs->cpu]->cur = freqs->new; | ||
| 275 | break; | ||
| 276 | } | ||
| 277 | up_read(&cpufreq_notifier_rwsem); | ||
| 278 | } | ||
| 279 | EXPORT_SYMBOL_GPL(cpufreq_notify_transition); | ||
| 280 | |||
| 281 | |||
| 282 | |||
| 283 | /********************************************************************* | ||
| 284 | * SYSFS INTERFACE * | ||
| 285 | *********************************************************************/ | ||
| 286 | |||
| 287 | /** | ||
| 288 | * cpufreq_parse_governor - parse a governor string | ||
| 289 | */ | ||
| 290 | static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, | ||
| 291 | struct cpufreq_governor **governor) | ||
| 292 | { | ||
| 293 | if (!cpufreq_driver) | ||
| 294 | return -EINVAL; | ||
| 295 | if (cpufreq_driver->setpolicy) { | ||
| 296 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { | ||
| 297 | *policy = CPUFREQ_POLICY_PERFORMANCE; | ||
| 298 | return 0; | ||
| 299 | } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { | ||
| 300 | *policy = CPUFREQ_POLICY_POWERSAVE; | ||
| 301 | return 0; | ||
| 302 | } | ||
| 303 | return -EINVAL; | ||
| 304 | } else { | ||
| 305 | struct cpufreq_governor *t; | ||
| 306 | down(&cpufreq_governor_sem); | ||
| 307 | if (!cpufreq_driver || !cpufreq_driver->target) | ||
| 308 | goto out; | ||
| 309 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
| 310 | if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { | ||
| 311 | *governor = t; | ||
| 312 | up(&cpufreq_governor_sem); | ||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | out: | ||
| 317 | up(&cpufreq_governor_sem); | ||
| 318 | } | ||
| 319 | return -EINVAL; | ||
| 320 | } | ||
| 321 | EXPORT_SYMBOL_GPL(cpufreq_parse_governor); | ||
| 322 | |||
| 323 | |||
| 324 | /* drivers/base/cpu.c */ | ||
| 325 | extern struct sysdev_class cpu_sysdev_class; | ||
| 326 | |||
| 327 | |||
| 328 | /** | ||
| 329 | * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information | ||
| 330 | * | ||
| 331 | * Write out information from cpufreq_driver->policy[cpu]; object must be | ||
| 332 | * "unsigned int". | ||
| 333 | */ | ||
| 334 | |||
| 335 | #define show_one(file_name, object) \ | ||
| 336 | static ssize_t show_##file_name \ | ||
| 337 | (struct cpufreq_policy * policy, char *buf) \ | ||
| 338 | { \ | ||
| 339 | return sprintf (buf, "%u\n", policy->object); \ | ||
| 340 | } | ||
| 341 | |||
| 342 | show_one(cpuinfo_min_freq, cpuinfo.min_freq); | ||
| 343 | show_one(cpuinfo_max_freq, cpuinfo.max_freq); | ||
| 344 | show_one(scaling_min_freq, min); | ||
| 345 | show_one(scaling_max_freq, max); | ||
| 346 | show_one(scaling_cur_freq, cur); | ||
| 347 | |||
| 348 | /** | ||
| 349 | * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access | ||
| 350 | */ | ||
| 351 | #define store_one(file_name, object) \ | ||
| 352 | static ssize_t store_##file_name \ | ||
| 353 | (struct cpufreq_policy * policy, const char *buf, size_t count) \ | ||
| 354 | { \ | ||
| 355 | unsigned int ret = -EINVAL; \ | ||
| 356 | struct cpufreq_policy new_policy; \ | ||
| 357 | \ | ||
| 358 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ | ||
| 359 | if (ret) \ | ||
| 360 | return -EINVAL; \ | ||
| 361 | \ | ||
| 362 | ret = sscanf (buf, "%u", &new_policy.object); \ | ||
| 363 | if (ret != 1) \ | ||
| 364 | return -EINVAL; \ | ||
| 365 | \ | ||
| 366 | ret = cpufreq_set_policy(&new_policy); \ | ||
| 367 | \ | ||
| 368 | return ret ? ret : count; \ | ||
| 369 | } | ||
| 370 | |||
| 371 | store_one(scaling_min_freq,min); | ||
| 372 | store_one(scaling_max_freq,max); | ||
| 373 | |||
| 374 | /** | ||
| 375 | * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware | ||
| 376 | */ | ||
| 377 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) | ||
| 378 | { | ||
| 379 | unsigned int cur_freq = cpufreq_get(policy->cpu); | ||
| 380 | if (!cur_freq) | ||
| 381 | return sprintf(buf, "<unknown>"); | ||
| 382 | return sprintf(buf, "%u\n", cur_freq); | ||
| 383 | } | ||
| 384 | |||
| 385 | |||
| 386 | /** | ||
| 387 | * show_scaling_governor - show the current policy for the specified CPU | ||
| 388 | */ | ||
| 389 | static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) | ||
| 390 | { | ||
| 391 | if(policy->policy == CPUFREQ_POLICY_POWERSAVE) | ||
| 392 | return sprintf(buf, "powersave\n"); | ||
| 393 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
| 394 | return sprintf(buf, "performance\n"); | ||
| 395 | else if (policy->governor) | ||
| 396 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); | ||
| 397 | return -EINVAL; | ||
| 398 | } | ||
| 399 | |||
| 400 | |||
| 401 | /** | ||
| 402 | * store_scaling_governor - store policy for the specified CPU | ||
| 403 | */ | ||
| 404 | static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | ||
| 405 | const char *buf, size_t count) | ||
| 406 | { | ||
| 407 | unsigned int ret = -EINVAL; | ||
| 408 | char str_governor[16]; | ||
| 409 | struct cpufreq_policy new_policy; | ||
| 410 | |||
| 411 | ret = cpufreq_get_policy(&new_policy, policy->cpu); | ||
| 412 | if (ret) | ||
| 413 | return ret; | ||
| 414 | |||
| 415 | ret = sscanf (buf, "%15s", str_governor); | ||
| 416 | if (ret != 1) | ||
| 417 | return -EINVAL; | ||
| 418 | |||
| 419 | if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) | ||
| 420 | return -EINVAL; | ||
| 421 | |||
| 422 | ret = cpufreq_set_policy(&new_policy); | ||
| 423 | |||
| 424 | return ret ? ret : count; | ||
| 425 | } | ||
| 426 | |||
| 427 | /** | ||
| 428 | * show_scaling_driver - show the cpufreq driver currently loaded | ||
| 429 | */ | ||
| 430 | static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf) | ||
| 431 | { | ||
| 432 | return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); | ||
| 433 | } | ||
| 434 | |||
| 435 | /** | ||
| 436 | * show_scaling_available_governors - show the available CPUfreq governors | ||
| 437 | */ | ||
| 438 | static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, | ||
| 439 | char *buf) | ||
| 440 | { | ||
| 441 | ssize_t i = 0; | ||
| 442 | struct cpufreq_governor *t; | ||
| 443 | |||
| 444 | if (!cpufreq_driver->target) { | ||
| 445 | i += sprintf(buf, "performance powersave"); | ||
| 446 | goto out; | ||
| 447 | } | ||
| 448 | |||
| 449 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
| 450 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) | ||
| 451 | goto out; | ||
| 452 | i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); | ||
| 453 | } | ||
| 454 | out: | ||
| 455 | i += sprintf(&buf[i], "\n"); | ||
| 456 | return i; | ||
| 457 | } | ||
| 458 | /** | ||
| 459 | * show_affected_cpus - show the CPUs affected by each transition | ||
| 460 | */ | ||
| 461 | static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf) | ||
| 462 | { | ||
| 463 | ssize_t i = 0; | ||
| 464 | unsigned int cpu; | ||
| 465 | |||
| 466 | for_each_cpu_mask(cpu, policy->cpus) { | ||
| 467 | if (i) | ||
| 468 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | ||
| 469 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | ||
| 470 | if (i >= (PAGE_SIZE - 5)) | ||
| 471 | break; | ||
| 472 | } | ||
| 473 | i += sprintf(&buf[i], "\n"); | ||
| 474 | return i; | ||
| 475 | } | ||
| 476 | |||
| 477 | |||
| 478 | #define define_one_ro(_name) \ | ||
| 479 | static struct freq_attr _name = \ | ||
| 480 | __ATTR(_name, 0444, show_##_name, NULL) | ||
| 481 | |||
| 482 | #define define_one_ro0400(_name) \ | ||
| 483 | static struct freq_attr _name = \ | ||
| 484 | __ATTR(_name, 0400, show_##_name, NULL) | ||
| 485 | |||
| 486 | #define define_one_rw(_name) \ | ||
| 487 | static struct freq_attr _name = \ | ||
| 488 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
| 489 | |||
| 490 | define_one_ro0400(cpuinfo_cur_freq); | ||
| 491 | define_one_ro(cpuinfo_min_freq); | ||
| 492 | define_one_ro(cpuinfo_max_freq); | ||
| 493 | define_one_ro(scaling_available_governors); | ||
| 494 | define_one_ro(scaling_driver); | ||
| 495 | define_one_ro(scaling_cur_freq); | ||
| 496 | define_one_ro(affected_cpus); | ||
| 497 | define_one_rw(scaling_min_freq); | ||
| 498 | define_one_rw(scaling_max_freq); | ||
| 499 | define_one_rw(scaling_governor); | ||
| 500 | |||
| 501 | static struct attribute * default_attrs[] = { | ||
| 502 | &cpuinfo_min_freq.attr, | ||
| 503 | &cpuinfo_max_freq.attr, | ||
| 504 | &scaling_min_freq.attr, | ||
| 505 | &scaling_max_freq.attr, | ||
| 506 | &affected_cpus.attr, | ||
| 507 | &scaling_governor.attr, | ||
| 508 | &scaling_driver.attr, | ||
| 509 | &scaling_available_governors.attr, | ||
| 510 | NULL | ||
| 511 | }; | ||
| 512 | |||
| 513 | #define to_policy(k) container_of(k,struct cpufreq_policy,kobj) | ||
| 514 | #define to_attr(a) container_of(a,struct freq_attr,attr) | ||
| 515 | |||
| 516 | static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | ||
| 517 | { | ||
| 518 | struct cpufreq_policy * policy = to_policy(kobj); | ||
| 519 | struct freq_attr * fattr = to_attr(attr); | ||
| 520 | ssize_t ret; | ||
| 521 | policy = cpufreq_cpu_get(policy->cpu); | ||
| 522 | if (!policy) | ||
| 523 | return -EINVAL; | ||
| 524 | ret = fattr->show ? fattr->show(policy,buf) : 0; | ||
| 525 | cpufreq_cpu_put(policy); | ||
| 526 | return ret; | ||
| 527 | } | ||
| 528 | |||
| 529 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | ||
| 530 | const char * buf, size_t count) | ||
| 531 | { | ||
| 532 | struct cpufreq_policy * policy = to_policy(kobj); | ||
| 533 | struct freq_attr * fattr = to_attr(attr); | ||
| 534 | ssize_t ret; | ||
| 535 | policy = cpufreq_cpu_get(policy->cpu); | ||
| 536 | if (!policy) | ||
| 537 | return -EINVAL; | ||
| 538 | ret = fattr->store ? fattr->store(policy,buf,count) : 0; | ||
| 539 | cpufreq_cpu_put(policy); | ||
| 540 | return ret; | ||
| 541 | } | ||
| 542 | |||
| 543 | static void cpufreq_sysfs_release(struct kobject * kobj) | ||
| 544 | { | ||
| 545 | struct cpufreq_policy * policy = to_policy(kobj); | ||
| 546 | dprintk("last reference is dropped\n"); | ||
| 547 | complete(&policy->kobj_unregister); | ||
| 548 | } | ||
| 549 | |||
| 550 | static struct sysfs_ops sysfs_ops = { | ||
| 551 | .show = show, | ||
| 552 | .store = store, | ||
| 553 | }; | ||
| 554 | |||
| 555 | static struct kobj_type ktype_cpufreq = { | ||
| 556 | .sysfs_ops = &sysfs_ops, | ||
| 557 | .default_attrs = default_attrs, | ||
| 558 | .release = cpufreq_sysfs_release, | ||
| 559 | }; | ||
| 560 | |||
| 561 | |||
| 562 | /** | ||
| 563 | * cpufreq_add_dev - add a CPU device | ||
| 564 | * | ||
| 565 | * Adds the cpufreq interface for a CPU device. | ||
| 566 | */ | ||
| 567 | static int cpufreq_add_dev (struct sys_device * sys_dev) | ||
| 568 | { | ||
| 569 | unsigned int cpu = sys_dev->id; | ||
| 570 | int ret = 0; | ||
| 571 | struct cpufreq_policy new_policy; | ||
| 572 | struct cpufreq_policy *policy; | ||
| 573 | struct freq_attr **drv_attr; | ||
| 574 | unsigned long flags; | ||
| 575 | unsigned int j; | ||
| 576 | |||
| 577 | cpufreq_debug_disable_ratelimit(); | ||
| 578 | dprintk("adding CPU %u\n", cpu); | ||
| 579 | |||
| 580 | #ifdef CONFIG_SMP | ||
| 581 | /* check whether a different CPU already registered this | ||
| 582 | * CPU because it is in the same boat. */ | ||
| 583 | policy = cpufreq_cpu_get(cpu); | ||
| 584 | if (unlikely(policy)) { | ||
| 585 | cpu_sys_devices[cpu] = sys_dev; | ||
| 586 | dprintk("CPU already managed, adding link\n"); | ||
| 587 | sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); | ||
| 588 | cpufreq_debug_enable_ratelimit(); | ||
| 589 | return 0; | ||
| 590 | } | ||
| 591 | #endif | ||
| 592 | |||
| 593 | if (!try_module_get(cpufreq_driver->owner)) { | ||
| 594 | ret = -EINVAL; | ||
| 595 | goto module_out; | ||
| 596 | } | ||
| 597 | |||
| 598 | policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | ||
| 599 | if (!policy) { | ||
| 600 | ret = -ENOMEM; | ||
| 601 | goto nomem_out; | ||
| 602 | } | ||
| 603 | memset(policy, 0, sizeof(struct cpufreq_policy)); | ||
| 604 | |||
| 605 | policy->cpu = cpu; | ||
| 606 | policy->cpus = cpumask_of_cpu(cpu); | ||
| 607 | |||
| 608 | init_MUTEX_LOCKED(&policy->lock); | ||
| 609 | init_completion(&policy->kobj_unregister); | ||
| 610 | INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); | ||
| 611 | |||
| 612 | /* call driver. From then on the cpufreq must be able | ||
| 613 | * to accept all calls to ->verify and ->setpolicy for this CPU | ||
| 614 | */ | ||
| 615 | ret = cpufreq_driver->init(policy); | ||
| 616 | if (ret) { | ||
| 617 | dprintk("initialization failed\n"); | ||
| 618 | goto err_out; | ||
| 619 | } | ||
| 620 | |||
| 621 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | ||
| 622 | |||
| 623 | /* prepare interface data */ | ||
| 624 | policy->kobj.parent = &sys_dev->kobj; | ||
| 625 | policy->kobj.ktype = &ktype_cpufreq; | ||
| 626 | strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); | ||
| 627 | |||
| 628 | ret = kobject_register(&policy->kobj); | ||
| 629 | if (ret) | ||
| 630 | goto err_out; | ||
| 631 | |||
| 632 | /* set up files for this cpu device */ | ||
| 633 | drv_attr = cpufreq_driver->attr; | ||
| 634 | while ((drv_attr) && (*drv_attr)) { | ||
| 635 | sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | ||
| 636 | drv_attr++; | ||
| 637 | } | ||
| 638 | if (cpufreq_driver->get) | ||
| 639 | sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | ||
| 640 | if (cpufreq_driver->target) | ||
| 641 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | ||
| 642 | |||
| 643 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 644 | for_each_cpu_mask(j, policy->cpus) | ||
| 645 | cpufreq_cpu_data[j] = policy; | ||
| 646 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 647 | policy->governor = NULL; /* to assure that the starting sequence is | ||
| 648 | * run in cpufreq_set_policy */ | ||
| 649 | up(&policy->lock); | ||
| 650 | |||
| 651 | /* set default policy */ | ||
| 652 | |||
| 653 | ret = cpufreq_set_policy(&new_policy); | ||
| 654 | if (ret) { | ||
| 655 | dprintk("setting policy failed\n"); | ||
| 656 | goto err_out_unregister; | ||
| 657 | } | ||
| 658 | |||
| 659 | module_put(cpufreq_driver->owner); | ||
| 660 | cpu_sys_devices[cpu] = sys_dev; | ||
| 661 | dprintk("initialization complete\n"); | ||
| 662 | cpufreq_debug_enable_ratelimit(); | ||
| 663 | |||
| 664 | return 0; | ||
| 665 | |||
| 666 | |||
| 667 | err_out_unregister: | ||
| 668 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 669 | for_each_cpu_mask(j, policy->cpus) | ||
| 670 | cpufreq_cpu_data[j] = NULL; | ||
| 671 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 672 | |||
| 673 | kobject_unregister(&policy->kobj); | ||
| 674 | wait_for_completion(&policy->kobj_unregister); | ||
| 675 | |||
| 676 | err_out: | ||
| 677 | kfree(policy); | ||
| 678 | |||
| 679 | nomem_out: | ||
| 680 | module_put(cpufreq_driver->owner); | ||
| 681 | module_out: | ||
| 682 | cpufreq_debug_enable_ratelimit(); | ||
| 683 | return ret; | ||
| 684 | } | ||
| 685 | |||
| 686 | |||
| 687 | /** | ||
| 688 | * cpufreq_remove_dev - remove a CPU device | ||
| 689 | * | ||
| 690 | * Removes the cpufreq interface for a CPU device. | ||
| 691 | */ | ||
| 692 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
| 693 | { | ||
| 694 | unsigned int cpu = sys_dev->id; | ||
| 695 | unsigned long flags; | ||
| 696 | struct cpufreq_policy *data; | ||
| 697 | #ifdef CONFIG_SMP | ||
| 698 | unsigned int j; | ||
| 699 | #endif | ||
| 700 | |||
| 701 | cpufreq_debug_disable_ratelimit(); | ||
| 702 | dprintk("unregistering CPU %u\n", cpu); | ||
| 703 | |||
| 704 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 705 | data = cpufreq_cpu_data[cpu]; | ||
| 706 | |||
| 707 | if (!data) { | ||
| 708 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 709 | cpu_sys_devices[cpu] = NULL; | ||
| 710 | cpufreq_debug_enable_ratelimit(); | ||
| 711 | return -EINVAL; | ||
| 712 | } | ||
| 713 | cpufreq_cpu_data[cpu] = NULL; | ||
| 714 | |||
| 715 | |||
| 716 | #ifdef CONFIG_SMP | ||
| 717 | /* if this isn't the CPU which is the parent of the kobj, we | ||
| 718 | * only need to unlink, put and exit | ||
| 719 | */ | ||
| 720 | if (unlikely(cpu != data->cpu)) { | ||
| 721 | dprintk("removing link\n"); | ||
| 722 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 723 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | ||
| 724 | cpu_sys_devices[cpu] = NULL; | ||
| 725 | cpufreq_cpu_put(data); | ||
| 726 | cpufreq_debug_enable_ratelimit(); | ||
| 727 | return 0; | ||
| 728 | } | ||
| 729 | #endif | ||
| 730 | |||
| 731 | cpu_sys_devices[cpu] = NULL; | ||
| 732 | |||
| 733 | if (!kobject_get(&data->kobj)) { | ||
| 734 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 735 | cpufreq_debug_enable_ratelimit(); | ||
| 736 | return -EFAULT; | ||
| 737 | } | ||
| 738 | |||
| 739 | #ifdef CONFIG_SMP | ||
| 740 | /* if we have other CPUs still registered, we need to unlink them, | ||
| 741 | * or else wait_for_completion below will lock up. Clean the | ||
| 742 | * cpufreq_cpu_data[] while holding the lock, and remove the sysfs | ||
| 743 | * links afterwards. | ||
| 744 | */ | ||
| 745 | if (unlikely(cpus_weight(data->cpus) > 1)) { | ||
| 746 | for_each_cpu_mask(j, data->cpus) { | ||
| 747 | if (j == cpu) | ||
| 748 | continue; | ||
| 749 | cpufreq_cpu_data[j] = NULL; | ||
| 750 | } | ||
| 751 | } | ||
| 752 | |||
| 753 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 754 | |||
| 755 | if (unlikely(cpus_weight(data->cpus) > 1)) { | ||
| 756 | for_each_cpu_mask(j, data->cpus) { | ||
| 757 | if (j == cpu) | ||
| 758 | continue; | ||
| 759 | dprintk("removing link for cpu %u\n", j); | ||
| 760 | sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq"); | ||
| 761 | cpufreq_cpu_put(data); | ||
| 762 | } | ||
| 763 | } | ||
| 764 | #else | ||
| 765 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 766 | #endif | ||
| 767 | |||
| 768 | down(&data->lock); | ||
| 769 | if (cpufreq_driver->target) | ||
| 770 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
| 771 | cpufreq_driver->target = NULL; | ||
| 772 | up(&data->lock); | ||
| 773 | |||
| 774 | kobject_unregister(&data->kobj); | ||
| 775 | |||
| 776 | kobject_put(&data->kobj); | ||
| 777 | |||
| 778 | /* we need to make sure that the underlying kobj is actually | ||
| 779 | * not referenced anymore by anybody before we proceed with | ||
| 780 | * unloading. | ||
| 781 | */ | ||
| 782 | dprintk("waiting for dropping of refcount\n"); | ||
| 783 | wait_for_completion(&data->kobj_unregister); | ||
| 784 | dprintk("wait complete\n"); | ||
| 785 | |||
| 786 | if (cpufreq_driver->exit) | ||
| 787 | cpufreq_driver->exit(data); | ||
| 788 | |||
| 789 | kfree(data); | ||
| 790 | |||
| 791 | cpufreq_debug_enable_ratelimit(); | ||
| 792 | |||
| 793 | return 0; | ||
| 794 | } | ||
| 795 | |||
| 796 | |||
| 797 | static void handle_update(void *data) | ||
| 798 | { | ||
| 799 | unsigned int cpu = (unsigned int)(long)data; | ||
| 800 | dprintk("handle_update for cpu %u called\n", cpu); | ||
| 801 | cpufreq_update_policy(cpu); | ||
| 802 | } | ||
| 803 | |||
| 804 | /** | ||
| 805 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. | ||
| 806 | * @cpu: cpu number | ||
| 807 | * @old_freq: CPU frequency the kernel thinks the CPU runs at | ||
| 808 | * @new_freq: CPU frequency the CPU actually runs at | ||
| 809 | * | ||
| 810 | * We adjust to current frequency first, and need to clean up later. So either call | ||
| 811 | * to cpufreq_update_policy() or schedule handle_update()). | ||
| 812 | */ | ||
| 813 | static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) | ||
| 814 | { | ||
| 815 | struct cpufreq_freqs freqs; | ||
| 816 | |||
| 817 | printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing " | ||
| 818 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | ||
| 819 | |||
| 820 | freqs.cpu = cpu; | ||
| 821 | freqs.old = old_freq; | ||
| 822 | freqs.new = new_freq; | ||
| 823 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
| 824 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
| 825 | } | ||
| 826 | |||
| 827 | |||
| 828 | /** | ||
| 829 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
| 830 | * @cpu: CPU number | ||
| 831 | * | ||
| 832 | * Get the CPU current (static) CPU frequency | ||
| 833 | */ | ||
| 834 | unsigned int cpufreq_get(unsigned int cpu) | ||
| 835 | { | ||
| 836 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
| 837 | unsigned int ret = 0; | ||
| 838 | |||
| 839 | if (!policy) | ||
| 840 | return 0; | ||
| 841 | |||
| 842 | if (!cpufreq_driver->get) | ||
| 843 | goto out; | ||
| 844 | |||
| 845 | down(&policy->lock); | ||
| 846 | |||
| 847 | ret = cpufreq_driver->get(cpu); | ||
| 848 | |||
| 849 | if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) | ||
| 850 | { | ||
| 851 | /* verify no discrepancy between actual and saved value exists */ | ||
| 852 | if (unlikely(ret != policy->cur)) { | ||
| 853 | cpufreq_out_of_sync(cpu, policy->cur, ret); | ||
| 854 | schedule_work(&policy->update); | ||
| 855 | } | ||
| 856 | } | ||
| 857 | |||
| 858 | up(&policy->lock); | ||
| 859 | |||
| 860 | out: | ||
| 861 | cpufreq_cpu_put(policy); | ||
| 862 | |||
| 863 | return (ret); | ||
| 864 | } | ||
| 865 | EXPORT_SYMBOL(cpufreq_get); | ||
| 866 | |||
| 867 | |||
| 868 | /** | ||
| 869 | * cpufreq_resume - restore proper CPU frequency handling after resume | ||
| 870 | * | ||
| 871 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | ||
| 872 | * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync | ||
| 873 | * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are restored. | ||
| 874 | */ | ||
| 875 | static int cpufreq_resume(struct sys_device * sysdev) | ||
| 876 | { | ||
| 877 | int cpu = sysdev->id; | ||
| 878 | unsigned int ret = 0; | ||
| 879 | struct cpufreq_policy *cpu_policy; | ||
| 880 | |||
| 881 | dprintk("resuming cpu %u\n", cpu); | ||
| 882 | |||
| 883 | if (!cpu_online(cpu)) | ||
| 884 | return 0; | ||
| 885 | |||
| 886 | /* we may be lax here as interrupts are off. Nonetheless | ||
| 887 | * we need to grab the correct cpu policy, as to check | ||
| 888 | * whether we really run on this CPU. | ||
| 889 | */ | ||
| 890 | |||
| 891 | cpu_policy = cpufreq_cpu_get(cpu); | ||
| 892 | if (!cpu_policy) | ||
| 893 | return -EINVAL; | ||
| 894 | |||
| 895 | /* only handle each CPU group once */ | ||
| 896 | if (unlikely(cpu_policy->cpu != cpu)) { | ||
| 897 | cpufreq_cpu_put(cpu_policy); | ||
| 898 | return 0; | ||
| 899 | } | ||
| 900 | |||
| 901 | if (cpufreq_driver->resume) { | ||
| 902 | ret = cpufreq_driver->resume(cpu_policy); | ||
| 903 | if (ret) { | ||
| 904 | printk(KERN_ERR "cpufreq: resume failed in ->resume " | ||
| 905 | "step on CPU %u\n", cpu_policy->cpu); | ||
| 906 | cpufreq_cpu_put(cpu_policy); | ||
| 907 | return ret; | ||
| 908 | } | ||
| 909 | } | ||
| 910 | |||
| 911 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
| 912 | unsigned int cur_freq = 0; | ||
| 913 | |||
| 914 | if (cpufreq_driver->get) | ||
| 915 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
| 916 | |||
| 917 | if (!cur_freq || !cpu_policy->cur) { | ||
| 918 | printk(KERN_ERR "cpufreq: resume failed to assert current frequency is what timing core thinks it is.\n"); | ||
| 919 | goto out; | ||
| 920 | } | ||
| 921 | |||
| 922 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
| 923 | struct cpufreq_freqs freqs; | ||
| 924 | |||
| 925 | printk(KERN_WARNING "Warning: CPU frequency is %u, " | ||
| 926 | "cpufreq assumed %u kHz.\n", cur_freq, cpu_policy->cur); | ||
| 927 | |||
| 928 | freqs.cpu = cpu; | ||
| 929 | freqs.old = cpu_policy->cur; | ||
| 930 | freqs.new = cur_freq; | ||
| 931 | |||
| 932 | notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_RESUMECHANGE, &freqs); | ||
| 933 | adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); | ||
| 934 | |||
| 935 | cpu_policy->cur = cur_freq; | ||
| 936 | } | ||
| 937 | } | ||
| 938 | |||
| 939 | out: | ||
| 940 | schedule_work(&cpu_policy->update); | ||
| 941 | cpufreq_cpu_put(cpu_policy); | ||
| 942 | return ret; | ||
| 943 | } | ||
| 944 | |||
| 945 | static struct sysdev_driver cpufreq_sysdev_driver = { | ||
| 946 | .add = cpufreq_add_dev, | ||
| 947 | .remove = cpufreq_remove_dev, | ||
| 948 | .resume = cpufreq_resume, | ||
| 949 | }; | ||
| 950 | |||
| 951 | |||
| 952 | /********************************************************************* | ||
| 953 | * NOTIFIER LISTS INTERFACE * | ||
| 954 | *********************************************************************/ | ||
| 955 | |||
| 956 | /** | ||
| 957 | * cpufreq_register_notifier - register a driver with cpufreq | ||
| 958 | * @nb: notifier function to register | ||
| 959 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | ||
| 960 | * | ||
| 961 | * Add a driver to one of two lists: either a list of drivers that | ||
| 962 | * are notified about clock rate changes (once before and once after | ||
| 963 | * the transition), or a list of drivers that are notified about | ||
| 964 | * changes in cpufreq policy. | ||
| 965 | * | ||
| 966 | * This function may sleep, and has the same return conditions as | ||
| 967 | * notifier_chain_register. | ||
| 968 | */ | ||
| 969 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | ||
| 970 | { | ||
| 971 | int ret; | ||
| 972 | |||
| 973 | down_write(&cpufreq_notifier_rwsem); | ||
| 974 | switch (list) { | ||
| 975 | case CPUFREQ_TRANSITION_NOTIFIER: | ||
| 976 | ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); | ||
| 977 | break; | ||
| 978 | case CPUFREQ_POLICY_NOTIFIER: | ||
| 979 | ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); | ||
| 980 | break; | ||
| 981 | default: | ||
| 982 | ret = -EINVAL; | ||
| 983 | } | ||
| 984 | up_write(&cpufreq_notifier_rwsem); | ||
| 985 | |||
| 986 | return ret; | ||
| 987 | } | ||
| 988 | EXPORT_SYMBOL(cpufreq_register_notifier); | ||
| 989 | |||
| 990 | |||
| 991 | /** | ||
| 992 | * cpufreq_unregister_notifier - unregister a driver with cpufreq | ||
| 993 | * @nb: notifier block to be unregistered | ||
| 994 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | ||
| 995 | * | ||
| 996 | * Remove a driver from the CPU frequency notifier list. | ||
| 997 | * | ||
| 998 | * This function may sleep, and has the same return conditions as | ||
| 999 | * notifier_chain_unregister. | ||
| 1000 | */ | ||
| 1001 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | ||
| 1002 | { | ||
| 1003 | int ret; | ||
| 1004 | |||
| 1005 | down_write(&cpufreq_notifier_rwsem); | ||
| 1006 | switch (list) { | ||
| 1007 | case CPUFREQ_TRANSITION_NOTIFIER: | ||
| 1008 | ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); | ||
| 1009 | break; | ||
| 1010 | case CPUFREQ_POLICY_NOTIFIER: | ||
| 1011 | ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); | ||
| 1012 | break; | ||
| 1013 | default: | ||
| 1014 | ret = -EINVAL; | ||
| 1015 | } | ||
| 1016 | up_write(&cpufreq_notifier_rwsem); | ||
| 1017 | |||
| 1018 | return ret; | ||
| 1019 | } | ||
| 1020 | EXPORT_SYMBOL(cpufreq_unregister_notifier); | ||
| 1021 | |||
| 1022 | |||
| 1023 | /********************************************************************* | ||
| 1024 | * GOVERNORS * | ||
| 1025 | *********************************************************************/ | ||
| 1026 | |||
| 1027 | |||
| 1028 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | ||
| 1029 | unsigned int target_freq, | ||
| 1030 | unsigned int relation) | ||
| 1031 | { | ||
| 1032 | int retval = -EINVAL; | ||
| 1033 | lock_cpu_hotplug(); | ||
| 1034 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | ||
| 1035 | target_freq, relation); | ||
| 1036 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | ||
| 1037 | retval = cpufreq_driver->target(policy, target_freq, relation); | ||
| 1038 | unlock_cpu_hotplug(); | ||
| 1039 | return retval; | ||
| 1040 | } | ||
| 1041 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | ||
| 1042 | |||
| 1043 | |||
| 1044 | int cpufreq_driver_target(struct cpufreq_policy *policy, | ||
| 1045 | unsigned int target_freq, | ||
| 1046 | unsigned int relation) | ||
| 1047 | { | ||
| 1048 | unsigned int ret; | ||
| 1049 | |||
| 1050 | policy = cpufreq_cpu_get(policy->cpu); | ||
| 1051 | if (!policy) | ||
| 1052 | return -EINVAL; | ||
| 1053 | |||
| 1054 | down(&policy->lock); | ||
| 1055 | |||
| 1056 | ret = __cpufreq_driver_target(policy, target_freq, relation); | ||
| 1057 | |||
| 1058 | up(&policy->lock); | ||
| 1059 | |||
| 1060 | cpufreq_cpu_put(policy); | ||
| 1061 | |||
| 1062 | return ret; | ||
| 1063 | } | ||
| 1064 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | ||
| 1065 | |||
| 1066 | |||
| 1067 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) | ||
| 1068 | { | ||
| 1069 | int ret = -EINVAL; | ||
| 1070 | |||
| 1071 | if (!try_module_get(policy->governor->owner)) | ||
| 1072 | return -EINVAL; | ||
| 1073 | |||
| 1074 | dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); | ||
| 1075 | ret = policy->governor->governor(policy, event); | ||
| 1076 | |||
| 1077 | /* we keep one module reference alive for each CPU governed by this CPU */ | ||
| 1078 | if ((event != CPUFREQ_GOV_START) || ret) | ||
| 1079 | module_put(policy->governor->owner); | ||
| 1080 | if ((event == CPUFREQ_GOV_STOP) && !ret) | ||
| 1081 | module_put(policy->governor->owner); | ||
| 1082 | |||
| 1083 | return ret; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | |||
| 1087 | int cpufreq_governor(unsigned int cpu, unsigned int event) | ||
| 1088 | { | ||
| 1089 | int ret = 0; | ||
| 1090 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
| 1091 | |||
| 1092 | if (!policy) | ||
| 1093 | return -EINVAL; | ||
| 1094 | |||
| 1095 | down(&policy->lock); | ||
| 1096 | ret = __cpufreq_governor(policy, event); | ||
| 1097 | up(&policy->lock); | ||
| 1098 | |||
| 1099 | cpufreq_cpu_put(policy); | ||
| 1100 | |||
| 1101 | return ret; | ||
| 1102 | } | ||
| 1103 | EXPORT_SYMBOL_GPL(cpufreq_governor); | ||
| 1104 | |||
| 1105 | |||
| 1106 | int cpufreq_register_governor(struct cpufreq_governor *governor) | ||
| 1107 | { | ||
| 1108 | struct cpufreq_governor *t; | ||
| 1109 | |||
| 1110 | if (!governor) | ||
| 1111 | return -EINVAL; | ||
| 1112 | |||
| 1113 | down(&cpufreq_governor_sem); | ||
| 1114 | |||
| 1115 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | ||
| 1116 | if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { | ||
| 1117 | up(&cpufreq_governor_sem); | ||
| 1118 | return -EBUSY; | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | list_add(&governor->governor_list, &cpufreq_governor_list); | ||
| 1122 | |||
| 1123 | up(&cpufreq_governor_sem); | ||
| 1124 | |||
| 1125 | return 0; | ||
| 1126 | } | ||
| 1127 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); | ||
| 1128 | |||
| 1129 | |||
| 1130 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) | ||
| 1131 | { | ||
| 1132 | if (!governor) | ||
| 1133 | return; | ||
| 1134 | |||
| 1135 | down(&cpufreq_governor_sem); | ||
| 1136 | list_del(&governor->governor_list); | ||
| 1137 | up(&cpufreq_governor_sem); | ||
| 1138 | return; | ||
| 1139 | } | ||
| 1140 | EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); | ||
| 1141 | |||
| 1142 | |||
| 1143 | |||
| 1144 | /********************************************************************* | ||
| 1145 | * POLICY INTERFACE * | ||
| 1146 | *********************************************************************/ | ||
| 1147 | |||
| 1148 | /** | ||
| 1149 | * cpufreq_get_policy - get the current cpufreq_policy | ||
| 1150 | * @policy: struct cpufreq_policy into which the current cpufreq_policy is written | ||
| 1151 | * | ||
| 1152 | * Reads the current cpufreq policy. | ||
| 1153 | */ | ||
| 1154 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | ||
| 1155 | { | ||
| 1156 | struct cpufreq_policy *cpu_policy; | ||
| 1157 | if (!policy) | ||
| 1158 | return -EINVAL; | ||
| 1159 | |||
| 1160 | cpu_policy = cpufreq_cpu_get(cpu); | ||
| 1161 | if (!cpu_policy) | ||
| 1162 | return -EINVAL; | ||
| 1163 | |||
| 1164 | down(&cpu_policy->lock); | ||
| 1165 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | ||
| 1166 | up(&cpu_policy->lock); | ||
| 1167 | |||
| 1168 | cpufreq_cpu_put(cpu_policy); | ||
| 1169 | |||
| 1170 | return 0; | ||
| 1171 | } | ||
| 1172 | EXPORT_SYMBOL(cpufreq_get_policy); | ||
| 1173 | |||
| 1174 | |||
| 1175 | static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) | ||
| 1176 | { | ||
| 1177 | int ret = 0; | ||
| 1178 | |||
| 1179 | cpufreq_debug_disable_ratelimit(); | ||
| 1180 | dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, | ||
| 1181 | policy->min, policy->max); | ||
| 1182 | |||
| 1183 | memcpy(&policy->cpuinfo, | ||
| 1184 | &data->cpuinfo, | ||
| 1185 | sizeof(struct cpufreq_cpuinfo)); | ||
| 1186 | |||
| 1187 | /* verify the cpu speed can be set within this limit */ | ||
| 1188 | ret = cpufreq_driver->verify(policy); | ||
| 1189 | if (ret) | ||
| 1190 | goto error_out; | ||
| 1191 | |||
| 1192 | down_read(&cpufreq_notifier_rwsem); | ||
| 1193 | |||
| 1194 | /* adjust if necessary - all reasons */ | ||
| 1195 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, | ||
| 1196 | policy); | ||
| 1197 | |||
| 1198 | /* adjust if necessary - hardware incompatibility*/ | ||
| 1199 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, | ||
| 1200 | policy); | ||
| 1201 | |||
| 1202 | /* verify the cpu speed can be set within this limit, | ||
| 1203 | which might be different to the first one */ | ||
| 1204 | ret = cpufreq_driver->verify(policy); | ||
| 1205 | if (ret) { | ||
| 1206 | up_read(&cpufreq_notifier_rwsem); | ||
| 1207 | goto error_out; | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | /* notification of the new policy */ | ||
| 1211 | notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, | ||
| 1212 | policy); | ||
| 1213 | |||
| 1214 | up_read(&cpufreq_notifier_rwsem); | ||
| 1215 | |||
| 1216 | data->min = policy->min; | ||
| 1217 | data->max = policy->max; | ||
| 1218 | |||
| 1219 | dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); | ||
| 1220 | |||
| 1221 | if (cpufreq_driver->setpolicy) { | ||
| 1222 | data->policy = policy->policy; | ||
| 1223 | dprintk("setting range\n"); | ||
| 1224 | ret = cpufreq_driver->setpolicy(policy); | ||
| 1225 | } else { | ||
| 1226 | if (policy->governor != data->governor) { | ||
| 1227 | /* save old, working values */ | ||
| 1228 | struct cpufreq_governor *old_gov = data->governor; | ||
| 1229 | |||
| 1230 | dprintk("governor switch\n"); | ||
| 1231 | |||
| 1232 | /* end old governor */ | ||
| 1233 | if (data->governor) | ||
| 1234 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
| 1235 | |||
| 1236 | /* start new governor */ | ||
| 1237 | data->governor = policy->governor; | ||
| 1238 | if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { | ||
| 1239 | /* new governor failed, so re-start old one */ | ||
| 1240 | dprintk("starting governor %s failed\n", data->governor->name); | ||
| 1241 | if (old_gov) { | ||
| 1242 | data->governor = old_gov; | ||
| 1243 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
| 1244 | } | ||
| 1245 | ret = -EINVAL; | ||
| 1246 | goto error_out; | ||
| 1247 | } | ||
| 1248 | /* might be a policy change, too, so fall through */ | ||
| 1249 | } | ||
| 1250 | dprintk("governor: change or update limits\n"); | ||
| 1251 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | error_out: | ||
| 1255 | cpufreq_debug_enable_ratelimit(); | ||
| 1256 | return ret; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | /** | ||
| 1260 | * cpufreq_set_policy - set a new CPUFreq policy | ||
| 1261 | * @policy: policy to be set. | ||
| 1262 | * | ||
| 1263 | * Sets a new CPU frequency and voltage scaling policy. | ||
| 1264 | */ | ||
| 1265 | int cpufreq_set_policy(struct cpufreq_policy *policy) | ||
| 1266 | { | ||
| 1267 | int ret = 0; | ||
| 1268 | struct cpufreq_policy *data; | ||
| 1269 | |||
| 1270 | if (!policy) | ||
| 1271 | return -EINVAL; | ||
| 1272 | |||
| 1273 | data = cpufreq_cpu_get(policy->cpu); | ||
| 1274 | if (!data) | ||
| 1275 | return -EINVAL; | ||
| 1276 | |||
| 1277 | /* lock this CPU */ | ||
| 1278 | down(&data->lock); | ||
| 1279 | |||
| 1280 | ret = __cpufreq_set_policy(data, policy); | ||
| 1281 | data->user_policy.min = data->min; | ||
| 1282 | data->user_policy.max = data->max; | ||
| 1283 | data->user_policy.policy = data->policy; | ||
| 1284 | data->user_policy.governor = data->governor; | ||
| 1285 | |||
| 1286 | up(&data->lock); | ||
| 1287 | cpufreq_cpu_put(data); | ||
| 1288 | |||
| 1289 | return ret; | ||
| 1290 | } | ||
| 1291 | EXPORT_SYMBOL(cpufreq_set_policy); | ||
| 1292 | |||
| 1293 | |||
| 1294 | /** | ||
| 1295 | * cpufreq_update_policy - re-evaluate an existing cpufreq policy | ||
| 1296 | * @cpu: CPU which shall be re-evaluated | ||
| 1297 | * | ||
| 1298 | * Usefull for policy notifiers which have different necessities | ||
| 1299 | * at different times. | ||
| 1300 | */ | ||
| 1301 | int cpufreq_update_policy(unsigned int cpu) | ||
| 1302 | { | ||
| 1303 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); | ||
| 1304 | struct cpufreq_policy policy; | ||
| 1305 | int ret = 0; | ||
| 1306 | |||
| 1307 | if (!data) | ||
| 1308 | return -ENODEV; | ||
| 1309 | |||
| 1310 | down(&data->lock); | ||
| 1311 | |||
| 1312 | dprintk("updating policy for CPU %u\n", cpu); | ||
| 1313 | memcpy(&policy, | ||
| 1314 | data, | ||
| 1315 | sizeof(struct cpufreq_policy)); | ||
| 1316 | policy.min = data->user_policy.min; | ||
| 1317 | policy.max = data->user_policy.max; | ||
| 1318 | policy.policy = data->user_policy.policy; | ||
| 1319 | policy.governor = data->user_policy.governor; | ||
| 1320 | |||
| 1321 | ret = __cpufreq_set_policy(data, &policy); | ||
| 1322 | |||
| 1323 | up(&data->lock); | ||
| 1324 | |||
| 1325 | cpufreq_cpu_put(data); | ||
| 1326 | return ret; | ||
| 1327 | } | ||
| 1328 | EXPORT_SYMBOL(cpufreq_update_policy); | ||
| 1329 | |||
| 1330 | |||
| 1331 | /********************************************************************* | ||
| 1332 | * REGISTER / UNREGISTER CPUFREQ DRIVER * | ||
| 1333 | *********************************************************************/ | ||
| 1334 | |||
| 1335 | /** | ||
| 1336 | * cpufreq_register_driver - register a CPU Frequency driver | ||
| 1337 | * @driver_data: A struct cpufreq_driver containing the values# | ||
| 1338 | * submitted by the CPU Frequency driver. | ||
| 1339 | * | ||
| 1340 | * Registers a CPU Frequency driver to this core code. This code | ||
| 1341 | * returns zero on success, -EBUSY when another driver got here first | ||
| 1342 | * (and isn't unregistered in the meantime). | ||
| 1343 | * | ||
| 1344 | */ | ||
| 1345 | int cpufreq_register_driver(struct cpufreq_driver *driver_data) | ||
| 1346 | { | ||
| 1347 | unsigned long flags; | ||
| 1348 | int ret; | ||
| 1349 | |||
| 1350 | if (!driver_data || !driver_data->verify || !driver_data->init || | ||
| 1351 | ((!driver_data->setpolicy) && (!driver_data->target))) | ||
| 1352 | return -EINVAL; | ||
| 1353 | |||
| 1354 | dprintk("trying to register driver %s\n", driver_data->name); | ||
| 1355 | |||
| 1356 | if (driver_data->setpolicy) | ||
| 1357 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | ||
| 1358 | |||
| 1359 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 1360 | if (cpufreq_driver) { | ||
| 1361 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1362 | return -EBUSY; | ||
| 1363 | } | ||
| 1364 | cpufreq_driver = driver_data; | ||
| 1365 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1366 | |||
| 1367 | ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); | ||
| 1368 | |||
| 1369 | if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { | ||
| 1370 | int i; | ||
| 1371 | ret = -ENODEV; | ||
| 1372 | |||
| 1373 | /* check for at least one working CPU */ | ||
| 1374 | for (i=0; i<NR_CPUS; i++) | ||
| 1375 | if (cpufreq_cpu_data[i]) | ||
| 1376 | ret = 0; | ||
| 1377 | |||
| 1378 | /* if all ->init() calls failed, unregister */ | ||
| 1379 | if (ret) { | ||
| 1380 | dprintk("no CPU initialized for driver %s\n", driver_data->name); | ||
| 1381 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | ||
| 1382 | |||
| 1383 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 1384 | cpufreq_driver = NULL; | ||
| 1385 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1386 | } | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | if (!ret) { | ||
| 1390 | dprintk("driver %s up and running\n", driver_data->name); | ||
| 1391 | cpufreq_debug_enable_ratelimit(); | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | return (ret); | ||
| 1395 | } | ||
| 1396 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | ||
| 1397 | |||
| 1398 | |||
| 1399 | /** | ||
| 1400 | * cpufreq_unregister_driver - unregister the current CPUFreq driver | ||
| 1401 | * | ||
| 1402 | * Unregister the current CPUFreq driver. Only call this if you have | ||
| 1403 | * the right to do so, i.e. if you have succeeded in initialising before! | ||
| 1404 | * Returns zero if successful, and -EINVAL if the cpufreq_driver is | ||
| 1405 | * currently not initialised. | ||
| 1406 | */ | ||
| 1407 | int cpufreq_unregister_driver(struct cpufreq_driver *driver) | ||
| 1408 | { | ||
| 1409 | unsigned long flags; | ||
| 1410 | |||
| 1411 | cpufreq_debug_disable_ratelimit(); | ||
| 1412 | |||
| 1413 | if (!cpufreq_driver || (driver != cpufreq_driver)) { | ||
| 1414 | cpufreq_debug_enable_ratelimit(); | ||
| 1415 | return -EINVAL; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | dprintk("unregistering driver %s\n", driver->name); | ||
| 1419 | |||
| 1420 | sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); | ||
| 1421 | |||
| 1422 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 1423 | cpufreq_driver = NULL; | ||
| 1424 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1425 | |||
| 1426 | return 0; | ||
| 1427 | } | ||
| 1428 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c new file mode 100644 index 000000000000..8d83a21c6477 --- /dev/null +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -0,0 +1,491 @@ | |||
| 1 | /* | ||
| 2 | * drivers/cpufreq/cpufreq_ondemand.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2001 Russell King | ||
| 5 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
| 6 | * Jun Nakajima <jun.nakajima@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/smp.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/ctype.h> | ||
| 19 | #include <linux/cpufreq.h> | ||
| 20 | #include <linux/sysctl.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/fs.h> | ||
| 23 | #include <linux/sysfs.h> | ||
| 24 | #include <linux/sched.h> | ||
| 25 | #include <linux/kmod.h> | ||
| 26 | #include <linux/workqueue.h> | ||
| 27 | #include <linux/jiffies.h> | ||
| 28 | #include <linux/kernel_stat.h> | ||
| 29 | #include <linux/percpu.h> | ||
| 30 | |||
| 31 | /* | ||
| 32 | * dbs is used in this file as a shortform for demandbased switching | ||
| 33 | * It helps to keep variable names smaller, simpler | ||
| 34 | */ | ||
| 35 | |||
| 36 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | ||
| 37 | #define MIN_FREQUENCY_UP_THRESHOLD (0) | ||
| 38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | ||
| 39 | |||
| 40 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | ||
| 41 | #define MIN_FREQUENCY_DOWN_THRESHOLD (0) | ||
| 42 | #define MAX_FREQUENCY_DOWN_THRESHOLD (100) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * The polling frequency of this governor depends on the capability of | ||
| 46 | * the processor. Default polling frequency is 1000 times the transition | ||
| 47 | * latency of the processor. The governor will work on any processor with | ||
| 48 | * transition latency <= 10mS, using appropriate sampling | ||
| 49 | * rate. | ||
| 50 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | ||
| 51 | * this governor will not work. | ||
| 52 | * All times here are in uS. | ||
| 53 | */ | ||
| 54 | static unsigned int def_sampling_rate; | ||
| 55 | #define MIN_SAMPLING_RATE (def_sampling_rate / 2) | ||
| 56 | #define MAX_SAMPLING_RATE (500 * def_sampling_rate) | ||
| 57 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) | ||
| 58 | #define DEF_SAMPLING_DOWN_FACTOR (10) | ||
| 59 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) | ||
| 60 | #define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000))) | ||
| 61 | |||
| 62 | static void do_dbs_timer(void *data); | ||
| 63 | |||
| 64 | struct cpu_dbs_info_s { | ||
| 65 | struct cpufreq_policy *cur_policy; | ||
| 66 | unsigned int prev_cpu_idle_up; | ||
| 67 | unsigned int prev_cpu_idle_down; | ||
| 68 | unsigned int enable; | ||
| 69 | }; | ||
| 70 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | ||
| 71 | |||
| 72 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | ||
| 73 | |||
| 74 | static DECLARE_MUTEX (dbs_sem); | ||
| 75 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | ||
| 76 | |||
| 77 | struct dbs_tuners { | ||
| 78 | unsigned int sampling_rate; | ||
| 79 | unsigned int sampling_down_factor; | ||
| 80 | unsigned int up_threshold; | ||
| 81 | unsigned int down_threshold; | ||
| 82 | }; | ||
| 83 | |||
| 84 | static struct dbs_tuners dbs_tuners_ins = { | ||
| 85 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | ||
| 86 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | ||
| 87 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | ||
| 88 | }; | ||
| 89 | |||
| 90 | /************************** sysfs interface ************************/ | ||
| 91 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | ||
| 92 | { | ||
| 93 | return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); | ||
| 94 | } | ||
| 95 | |||
| 96 | static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | ||
| 97 | { | ||
| 98 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); | ||
| 99 | } | ||
| 100 | |||
| 101 | #define define_one_ro(_name) \ | ||
| 102 | static struct freq_attr _name = \ | ||
| 103 | __ATTR(_name, 0444, show_##_name, NULL) | ||
| 104 | |||
| 105 | define_one_ro(sampling_rate_max); | ||
| 106 | define_one_ro(sampling_rate_min); | ||
| 107 | |||
| 108 | /* cpufreq_ondemand Governor Tunables */ | ||
| 109 | #define show_one(file_name, object) \ | ||
| 110 | static ssize_t show_##file_name \ | ||
| 111 | (struct cpufreq_policy *unused, char *buf) \ | ||
| 112 | { \ | ||
| 113 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | ||
| 114 | } | ||
| 115 | show_one(sampling_rate, sampling_rate); | ||
| 116 | show_one(sampling_down_factor, sampling_down_factor); | ||
| 117 | show_one(up_threshold, up_threshold); | ||
| 118 | show_one(down_threshold, down_threshold); | ||
| 119 | |||
| 120 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | ||
| 121 | const char *buf, size_t count) | ||
| 122 | { | ||
| 123 | unsigned int input; | ||
| 124 | int ret; | ||
| 125 | ret = sscanf (buf, "%u", &input); | ||
| 126 | if (ret != 1 ) | ||
| 127 | return -EINVAL; | ||
| 128 | |||
| 129 | down(&dbs_sem); | ||
| 130 | dbs_tuners_ins.sampling_down_factor = input; | ||
| 131 | up(&dbs_sem); | ||
| 132 | |||
| 133 | return count; | ||
| 134 | } | ||
| 135 | |||
| 136 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | ||
| 137 | const char *buf, size_t count) | ||
| 138 | { | ||
| 139 | unsigned int input; | ||
| 140 | int ret; | ||
| 141 | ret = sscanf (buf, "%u", &input); | ||
| 142 | |||
| 143 | down(&dbs_sem); | ||
| 144 | if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { | ||
| 145 | up(&dbs_sem); | ||
| 146 | return -EINVAL; | ||
| 147 | } | ||
| 148 | |||
| 149 | dbs_tuners_ins.sampling_rate = input; | ||
| 150 | up(&dbs_sem); | ||
| 151 | |||
| 152 | return count; | ||
| 153 | } | ||
| 154 | |||
| 155 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, | ||
| 156 | const char *buf, size_t count) | ||
| 157 | { | ||
| 158 | unsigned int input; | ||
| 159 | int ret; | ||
| 160 | ret = sscanf (buf, "%u", &input); | ||
| 161 | |||
| 162 | down(&dbs_sem); | ||
| 163 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || | ||
| 164 | input < MIN_FREQUENCY_UP_THRESHOLD || | ||
| 165 | input <= dbs_tuners_ins.down_threshold) { | ||
| 166 | up(&dbs_sem); | ||
| 167 | return -EINVAL; | ||
| 168 | } | ||
| 169 | |||
| 170 | dbs_tuners_ins.up_threshold = input; | ||
| 171 | up(&dbs_sem); | ||
| 172 | |||
| 173 | return count; | ||
| 174 | } | ||
| 175 | |||
| 176 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, | ||
| 177 | const char *buf, size_t count) | ||
| 178 | { | ||
| 179 | unsigned int input; | ||
| 180 | int ret; | ||
| 181 | ret = sscanf (buf, "%u", &input); | ||
| 182 | |||
| 183 | down(&dbs_sem); | ||
| 184 | if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || | ||
| 185 | input < MIN_FREQUENCY_DOWN_THRESHOLD || | ||
| 186 | input >= dbs_tuners_ins.up_threshold) { | ||
| 187 | up(&dbs_sem); | ||
| 188 | return -EINVAL; | ||
| 189 | } | ||
| 190 | |||
| 191 | dbs_tuners_ins.down_threshold = input; | ||
| 192 | up(&dbs_sem); | ||
| 193 | |||
| 194 | return count; | ||
| 195 | } | ||
| 196 | |||
| 197 | #define define_one_rw(_name) \ | ||
| 198 | static struct freq_attr _name = \ | ||
| 199 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
| 200 | |||
| 201 | define_one_rw(sampling_rate); | ||
| 202 | define_one_rw(sampling_down_factor); | ||
| 203 | define_one_rw(up_threshold); | ||
| 204 | define_one_rw(down_threshold); | ||
| 205 | |||
| 206 | static struct attribute * dbs_attributes[] = { | ||
| 207 | &sampling_rate_max.attr, | ||
| 208 | &sampling_rate_min.attr, | ||
| 209 | &sampling_rate.attr, | ||
| 210 | &sampling_down_factor.attr, | ||
| 211 | &up_threshold.attr, | ||
| 212 | &down_threshold.attr, | ||
| 213 | NULL | ||
| 214 | }; | ||
| 215 | |||
| 216 | static struct attribute_group dbs_attr_group = { | ||
| 217 | .attrs = dbs_attributes, | ||
| 218 | .name = "ondemand", | ||
| 219 | }; | ||
| 220 | |||
| 221 | /************************** sysfs end ************************/ | ||
| 222 | |||
| 223 | static void dbs_check_cpu(int cpu) | ||
| 224 | { | ||
| 225 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | ||
| 226 | unsigned int total_idle_ticks; | ||
| 227 | unsigned int freq_down_step; | ||
| 228 | unsigned int freq_down_sampling_rate; | ||
| 229 | static int down_skip[NR_CPUS]; | ||
| 230 | struct cpu_dbs_info_s *this_dbs_info; | ||
| 231 | |||
| 232 | struct cpufreq_policy *policy; | ||
| 233 | unsigned int j; | ||
| 234 | |||
| 235 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
| 236 | if (!this_dbs_info->enable) | ||
| 237 | return; | ||
| 238 | |||
| 239 | policy = this_dbs_info->cur_policy; | ||
| 240 | /* | ||
| 241 | * The default safe range is 20% to 80% | ||
| 242 | * Every sampling_rate, we check | ||
| 243 | * - If current idle time is less than 20%, then we try to | ||
| 244 | * increase frequency | ||
| 245 | * Every sampling_rate*sampling_down_factor, we check | ||
| 246 | * - If current idle time is more than 80%, then we try to | ||
| 247 | * decrease frequency | ||
| 248 | * | ||
| 249 | * Any frequency increase takes it to the maximum frequency. | ||
| 250 | * Frequency reduction happens at minimum steps of | ||
| 251 | * 5% of max_frequency | ||
| 252 | */ | ||
| 253 | |||
| 254 | /* Check for frequency increase */ | ||
| 255 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | ||
| 256 | kstat_cpu(cpu).cpustat.iowait; | ||
| 257 | idle_ticks = total_idle_ticks - | ||
| 258 | this_dbs_info->prev_cpu_idle_up; | ||
| 259 | this_dbs_info->prev_cpu_idle_up = total_idle_ticks; | ||
| 260 | |||
| 261 | |||
| 262 | for_each_cpu_mask(j, policy->cpus) { | ||
| 263 | unsigned int tmp_idle_ticks; | ||
| 264 | struct cpu_dbs_info_s *j_dbs_info; | ||
| 265 | |||
| 266 | if (j == cpu) | ||
| 267 | continue; | ||
| 268 | |||
| 269 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | ||
| 270 | /* Check for frequency increase */ | ||
| 271 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | ||
| 272 | kstat_cpu(j).cpustat.iowait; | ||
| 273 | tmp_idle_ticks = total_idle_ticks - | ||
| 274 | j_dbs_info->prev_cpu_idle_up; | ||
| 275 | j_dbs_info->prev_cpu_idle_up = total_idle_ticks; | ||
| 276 | |||
| 277 | if (tmp_idle_ticks < idle_ticks) | ||
| 278 | idle_ticks = tmp_idle_ticks; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Scale idle ticks by 100 and compare with up and down ticks */ | ||
| 282 | idle_ticks *= 100; | ||
| 283 | up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * | ||
| 284 | sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); | ||
| 285 | |||
| 286 | if (idle_ticks < up_idle_ticks) { | ||
| 287 | __cpufreq_driver_target(policy, policy->max, | ||
| 288 | CPUFREQ_RELATION_H); | ||
| 289 | down_skip[cpu] = 0; | ||
| 290 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
| 291 | return; | ||
| 292 | } | ||
| 293 | |||
| 294 | /* Check for frequency decrease */ | ||
| 295 | down_skip[cpu]++; | ||
| 296 | if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) | ||
| 297 | return; | ||
| 298 | |||
| 299 | total_idle_ticks = kstat_cpu(cpu).cpustat.idle + | ||
| 300 | kstat_cpu(cpu).cpustat.iowait; | ||
| 301 | idle_ticks = total_idle_ticks - | ||
| 302 | this_dbs_info->prev_cpu_idle_down; | ||
| 303 | this_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
| 304 | |||
| 305 | for_each_cpu_mask(j, policy->cpus) { | ||
| 306 | unsigned int tmp_idle_ticks; | ||
| 307 | struct cpu_dbs_info_s *j_dbs_info; | ||
| 308 | |||
| 309 | if (j == cpu) | ||
| 310 | continue; | ||
| 311 | |||
| 312 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | ||
| 313 | /* Check for frequency increase */ | ||
| 314 | total_idle_ticks = kstat_cpu(j).cpustat.idle + | ||
| 315 | kstat_cpu(j).cpustat.iowait; | ||
| 316 | tmp_idle_ticks = total_idle_ticks - | ||
| 317 | j_dbs_info->prev_cpu_idle_down; | ||
| 318 | j_dbs_info->prev_cpu_idle_down = total_idle_ticks; | ||
| 319 | |||
| 320 | if (tmp_idle_ticks < idle_ticks) | ||
| 321 | idle_ticks = tmp_idle_ticks; | ||
| 322 | } | ||
| 323 | |||
| 324 | /* Scale idle ticks by 100 and compare with up and down ticks */ | ||
| 325 | idle_ticks *= 100; | ||
| 326 | down_skip[cpu] = 0; | ||
| 327 | |||
| 328 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * | ||
| 329 | dbs_tuners_ins.sampling_down_factor; | ||
| 330 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | ||
| 331 | sampling_rate_in_HZ(freq_down_sampling_rate); | ||
| 332 | |||
| 333 | if (idle_ticks > down_idle_ticks ) { | ||
| 334 | freq_down_step = (5 * policy->max) / 100; | ||
| 335 | |||
| 336 | /* max freq cannot be less than 100. But who knows.... */ | ||
| 337 | if (unlikely(freq_down_step == 0)) | ||
| 338 | freq_down_step = 5; | ||
| 339 | |||
| 340 | __cpufreq_driver_target(policy, | ||
| 341 | policy->cur - freq_down_step, | ||
| 342 | CPUFREQ_RELATION_H); | ||
| 343 | return; | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 347 | static void do_dbs_timer(void *data) | ||
| 348 | { | ||
| 349 | int i; | ||
| 350 | down(&dbs_sem); | ||
| 351 | for (i = 0; i < NR_CPUS; i++) | ||
| 352 | if (cpu_online(i)) | ||
| 353 | dbs_check_cpu(i); | ||
| 354 | schedule_delayed_work(&dbs_work, | ||
| 355 | sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); | ||
| 356 | up(&dbs_sem); | ||
| 357 | } | ||
| 358 | |||
| 359 | static inline void dbs_timer_init(void) | ||
| 360 | { | ||
| 361 | INIT_WORK(&dbs_work, do_dbs_timer, NULL); | ||
| 362 | schedule_delayed_work(&dbs_work, | ||
| 363 | sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); | ||
| 364 | return; | ||
| 365 | } | ||
| 366 | |||
| 367 | static inline void dbs_timer_exit(void) | ||
| 368 | { | ||
| 369 | cancel_delayed_work(&dbs_work); | ||
| 370 | return; | ||
| 371 | } | ||
| 372 | |||
| 373 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | ||
| 374 | unsigned int event) | ||
| 375 | { | ||
| 376 | unsigned int cpu = policy->cpu; | ||
| 377 | struct cpu_dbs_info_s *this_dbs_info; | ||
| 378 | unsigned int j; | ||
| 379 | |||
| 380 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
| 381 | |||
| 382 | switch (event) { | ||
| 383 | case CPUFREQ_GOV_START: | ||
| 384 | if ((!cpu_online(cpu)) || | ||
| 385 | (!policy->cur)) | ||
| 386 | return -EINVAL; | ||
| 387 | |||
| 388 | if (policy->cpuinfo.transition_latency > | ||
| 389 | (TRANSITION_LATENCY_LIMIT * 1000)) | ||
| 390 | return -EINVAL; | ||
| 391 | if (this_dbs_info->enable) /* Already enabled */ | ||
| 392 | break; | ||
| 393 | |||
| 394 | down(&dbs_sem); | ||
| 395 | for_each_cpu_mask(j, policy->cpus) { | ||
| 396 | struct cpu_dbs_info_s *j_dbs_info; | ||
| 397 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | ||
| 398 | j_dbs_info->cur_policy = policy; | ||
| 399 | |||
| 400 | j_dbs_info->prev_cpu_idle_up = | ||
| 401 | kstat_cpu(j).cpustat.idle + | ||
| 402 | kstat_cpu(j).cpustat.iowait; | ||
| 403 | j_dbs_info->prev_cpu_idle_down = | ||
| 404 | kstat_cpu(j).cpustat.idle + | ||
| 405 | kstat_cpu(j).cpustat.iowait; | ||
| 406 | } | ||
| 407 | this_dbs_info->enable = 1; | ||
| 408 | sysfs_create_group(&policy->kobj, &dbs_attr_group); | ||
| 409 | dbs_enable++; | ||
| 410 | /* | ||
| 411 | * Start the timerschedule work, when this governor | ||
| 412 | * is used for first time | ||
| 413 | */ | ||
| 414 | if (dbs_enable == 1) { | ||
| 415 | unsigned int latency; | ||
| 416 | /* policy latency is in nS. Convert it to uS first */ | ||
| 417 | |||
| 418 | latency = policy->cpuinfo.transition_latency; | ||
| 419 | if (latency < 1000) | ||
| 420 | latency = 1000; | ||
| 421 | |||
| 422 | def_sampling_rate = (latency / 1000) * | ||
| 423 | DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; | ||
| 424 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | ||
| 425 | |||
| 426 | dbs_timer_init(); | ||
| 427 | } | ||
| 428 | |||
| 429 | up(&dbs_sem); | ||
| 430 | break; | ||
| 431 | |||
| 432 | case CPUFREQ_GOV_STOP: | ||
| 433 | down(&dbs_sem); | ||
| 434 | this_dbs_info->enable = 0; | ||
| 435 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | ||
| 436 | dbs_enable--; | ||
| 437 | /* | ||
| 438 | * Stop the timerschedule work, when this governor | ||
| 439 | * is used for first time | ||
| 440 | */ | ||
| 441 | if (dbs_enable == 0) | ||
| 442 | dbs_timer_exit(); | ||
| 443 | |||
| 444 | up(&dbs_sem); | ||
| 445 | |||
| 446 | break; | ||
| 447 | |||
| 448 | case CPUFREQ_GOV_LIMITS: | ||
| 449 | down(&dbs_sem); | ||
| 450 | if (policy->max < this_dbs_info->cur_policy->cur) | ||
| 451 | __cpufreq_driver_target( | ||
| 452 | this_dbs_info->cur_policy, | ||
| 453 | policy->max, CPUFREQ_RELATION_H); | ||
| 454 | else if (policy->min > this_dbs_info->cur_policy->cur) | ||
| 455 | __cpufreq_driver_target( | ||
| 456 | this_dbs_info->cur_policy, | ||
| 457 | policy->min, CPUFREQ_RELATION_L); | ||
| 458 | up(&dbs_sem); | ||
| 459 | break; | ||
| 460 | } | ||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | struct cpufreq_governor cpufreq_gov_dbs = { | ||
| 465 | .name = "ondemand", | ||
| 466 | .governor = cpufreq_governor_dbs, | ||
| 467 | .owner = THIS_MODULE, | ||
| 468 | }; | ||
| 469 | EXPORT_SYMBOL(cpufreq_gov_dbs); | ||
| 470 | |||
| 471 | static int __init cpufreq_gov_dbs_init(void) | ||
| 472 | { | ||
| 473 | return cpufreq_register_governor(&cpufreq_gov_dbs); | ||
| 474 | } | ||
| 475 | |||
| 476 | static void __exit cpufreq_gov_dbs_exit(void) | ||
| 477 | { | ||
| 478 | /* Make sure that the scheduled work is indeed not running */ | ||
| 479 | flush_scheduled_work(); | ||
| 480 | |||
| 481 | cpufreq_unregister_governor(&cpufreq_gov_dbs); | ||
| 482 | } | ||
| 483 | |||
| 484 | |||
| 485 | MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); | ||
| 486 | MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " | ||
| 487 | "Low Latency Frequency Transition capable processors"); | ||
| 488 | MODULE_LICENSE ("GPL"); | ||
| 489 | |||
| 490 | module_init(cpufreq_gov_dbs_init); | ||
| 491 | module_exit(cpufreq_gov_dbs_exit); | ||
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c new file mode 100644 index 000000000000..8d536b40deb8 --- /dev/null +++ b/drivers/cpufreq/cpufreq_performance.c | |||
| @@ -0,0 +1,61 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/cpufreq/cpufreq_performance.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
| 5 | * | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/cpufreq.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | |||
| 18 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) | ||
| 19 | |||
| 20 | |||
| 21 | static int cpufreq_governor_performance(struct cpufreq_policy *policy, | ||
| 22 | unsigned int event) | ||
| 23 | { | ||
| 24 | switch (event) { | ||
| 25 | case CPUFREQ_GOV_START: | ||
| 26 | case CPUFREQ_GOV_LIMITS: | ||
| 27 | dprintk("setting to %u kHz because of event %u\n", policy->max, event); | ||
| 28 | __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); | ||
| 29 | break; | ||
| 30 | default: | ||
| 31 | break; | ||
| 32 | } | ||
| 33 | return 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | struct cpufreq_governor cpufreq_gov_performance = { | ||
| 37 | .name = "performance", | ||
| 38 | .governor = cpufreq_governor_performance, | ||
| 39 | .owner = THIS_MODULE, | ||
| 40 | }; | ||
| 41 | EXPORT_SYMBOL(cpufreq_gov_performance); | ||
| 42 | |||
| 43 | |||
| 44 | static int __init cpufreq_gov_performance_init(void) | ||
| 45 | { | ||
| 46 | return cpufreq_register_governor(&cpufreq_gov_performance); | ||
| 47 | } | ||
| 48 | |||
| 49 | |||
| 50 | static void __exit cpufreq_gov_performance_exit(void) | ||
| 51 | { | ||
| 52 | cpufreq_unregister_governor(&cpufreq_gov_performance); | ||
| 53 | } | ||
| 54 | |||
| 55 | |||
| 56 | MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); | ||
| 57 | MODULE_DESCRIPTION("CPUfreq policy governor 'performance'"); | ||
| 58 | MODULE_LICENSE("GPL"); | ||
| 59 | |||
| 60 | fs_initcall(cpufreq_gov_performance_init); | ||
| 61 | module_exit(cpufreq_gov_performance_exit); | ||
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c new file mode 100644 index 000000000000..c85edda7feb0 --- /dev/null +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
| @@ -0,0 +1,59 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/cpufreq/cpufreq_powersave.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | ||
| 5 | * | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/cpufreq.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | |||
| 18 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) | ||
| 19 | |||
| 20 | static int cpufreq_governor_powersave(struct cpufreq_policy *policy, | ||
| 21 | unsigned int event) | ||
| 22 | { | ||
| 23 | switch (event) { | ||
| 24 | case CPUFREQ_GOV_START: | ||
| 25 | case CPUFREQ_GOV_LIMITS: | ||
| 26 | dprintk("setting to %u kHz because of event %u\n", policy->min, event); | ||
| 27 | __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); | ||
| 28 | break; | ||
| 29 | default: | ||
| 30 | break; | ||
| 31 | } | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static struct cpufreq_governor cpufreq_gov_powersave = { | ||
| 36 | .name = "powersave", | ||
| 37 | .governor = cpufreq_governor_powersave, | ||
| 38 | .owner = THIS_MODULE, | ||
| 39 | }; | ||
| 40 | |||
| 41 | |||
| 42 | static int __init cpufreq_gov_powersave_init(void) | ||
| 43 | { | ||
| 44 | return cpufreq_register_governor(&cpufreq_gov_powersave); | ||
| 45 | } | ||
| 46 | |||
| 47 | |||
| 48 | static void __exit cpufreq_gov_powersave_exit(void) | ||
| 49 | { | ||
| 50 | cpufreq_unregister_governor(&cpufreq_gov_powersave); | ||
| 51 | } | ||
| 52 | |||
| 53 | |||
| 54 | MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); | ||
| 55 | MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'"); | ||
| 56 | MODULE_LICENSE("GPL"); | ||
| 57 | |||
| 58 | module_init(cpufreq_gov_powersave_init); | ||
| 59 | module_exit(cpufreq_gov_powersave_exit); | ||
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c new file mode 100644 index 000000000000..2084593937c6 --- /dev/null +++ b/drivers/cpufreq/cpufreq_stats.c | |||
| @@ -0,0 +1,334 @@ | |||
| 1 | /* | ||
| 2 | * drivers/cpufreq/cpufreq_stats.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | ||
| 5 | * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/config.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/sysdev.h> | ||
| 15 | #include <linux/cpu.h> | ||
| 16 | #include <linux/sysfs.h> | ||
| 17 | #include <linux/cpufreq.h> | ||
| 18 | #include <linux/jiffies.h> | ||
| 19 | #include <linux/percpu.h> | ||
| 20 | #include <linux/kobject.h> | ||
| 21 | #include <linux/spinlock.h> | ||
| 22 | |||
| 23 | static spinlock_t cpufreq_stats_lock; | ||
| 24 | |||
| 25 | #define CPUFREQ_STATDEVICE_ATTR(_name,_mode,_show) \ | ||
| 26 | static struct freq_attr _attr_##_name = {\ | ||
| 27 | .attr = {.name = __stringify(_name), .owner = THIS_MODULE, \ | ||
| 28 | .mode = _mode, }, \ | ||
| 29 | .show = _show,\ | ||
| 30 | }; | ||
| 31 | |||
| 32 | static unsigned long | ||
| 33 | delta_time(unsigned long old, unsigned long new) | ||
| 34 | { | ||
| 35 | return (old > new) ? (old - new): (new + ~old + 1); | ||
| 36 | } | ||
| 37 | |||
| 38 | struct cpufreq_stats { | ||
| 39 | unsigned int cpu; | ||
| 40 | unsigned int total_trans; | ||
| 41 | unsigned long long last_time; | ||
| 42 | unsigned int max_state; | ||
| 43 | unsigned int state_num; | ||
| 44 | unsigned int last_index; | ||
| 45 | unsigned long long *time_in_state; | ||
| 46 | unsigned int *freq_table; | ||
| 47 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 48 | unsigned int *trans_table; | ||
| 49 | #endif | ||
| 50 | }; | ||
| 51 | |||
| 52 | static struct cpufreq_stats *cpufreq_stats_table[NR_CPUS]; | ||
| 53 | |||
| 54 | struct cpufreq_stats_attribute { | ||
| 55 | struct attribute attr; | ||
| 56 | ssize_t(*show) (struct cpufreq_stats *, char *); | ||
| 57 | }; | ||
| 58 | |||
| 59 | static int | ||
| 60 | cpufreq_stats_update (unsigned int cpu) | ||
| 61 | { | ||
| 62 | struct cpufreq_stats *stat; | ||
| 63 | spin_lock(&cpufreq_stats_lock); | ||
| 64 | stat = cpufreq_stats_table[cpu]; | ||
| 65 | if (stat->time_in_state) | ||
| 66 | stat->time_in_state[stat->last_index] += | ||
| 67 | delta_time(stat->last_time, jiffies); | ||
| 68 | stat->last_time = jiffies; | ||
| 69 | spin_unlock(&cpufreq_stats_lock); | ||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | static ssize_t | ||
| 74 | show_total_trans(struct cpufreq_policy *policy, char *buf) | ||
| 75 | { | ||
| 76 | struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; | ||
| 77 | if(!stat) | ||
| 78 | return 0; | ||
| 79 | return sprintf(buf, "%d\n", | ||
| 80 | cpufreq_stats_table[stat->cpu]->total_trans); | ||
| 81 | } | ||
| 82 | |||
| 83 | static ssize_t | ||
| 84 | show_time_in_state(struct cpufreq_policy *policy, char *buf) | ||
| 85 | { | ||
| 86 | ssize_t len = 0; | ||
| 87 | int i; | ||
| 88 | struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; | ||
| 89 | if(!stat) | ||
| 90 | return 0; | ||
| 91 | cpufreq_stats_update(stat->cpu); | ||
| 92 | for (i = 0; i < stat->state_num; i++) { | ||
| 93 | len += sprintf(buf + len, "%u %llu\n", | ||
| 94 | stat->freq_table[i], stat->time_in_state[i]); | ||
| 95 | } | ||
| 96 | return len; | ||
| 97 | } | ||
| 98 | |||
| 99 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 100 | static ssize_t | ||
| 101 | show_trans_table(struct cpufreq_policy *policy, char *buf) | ||
| 102 | { | ||
| 103 | ssize_t len = 0; | ||
| 104 | int i, j; | ||
| 105 | |||
| 106 | struct cpufreq_stats *stat = cpufreq_stats_table[policy->cpu]; | ||
| 107 | if(!stat) | ||
| 108 | return 0; | ||
| 109 | cpufreq_stats_update(stat->cpu); | ||
| 110 | for (i = 0; i < stat->state_num; i++) { | ||
| 111 | if (len >= PAGE_SIZE) | ||
| 112 | break; | ||
| 113 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u:\t", | ||
| 114 | stat->freq_table[i]); | ||
| 115 | |||
| 116 | for (j = 0; j < stat->state_num; j++) { | ||
| 117 | if (len >= PAGE_SIZE) | ||
| 118 | break; | ||
| 119 | len += snprintf(buf + len, PAGE_SIZE - len, "%u\t", | ||
| 120 | stat->trans_table[i*stat->max_state+j]); | ||
| 121 | } | ||
| 122 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); | ||
| 123 | } | ||
| 124 | return len; | ||
| 125 | } | ||
| 126 | CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table); | ||
| 127 | #endif | ||
| 128 | |||
| 129 | CPUFREQ_STATDEVICE_ATTR(total_trans,0444,show_total_trans); | ||
| 130 | CPUFREQ_STATDEVICE_ATTR(time_in_state,0444,show_time_in_state); | ||
| 131 | |||
| 132 | static struct attribute *default_attrs[] = { | ||
| 133 | &_attr_total_trans.attr, | ||
| 134 | &_attr_time_in_state.attr, | ||
| 135 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 136 | &_attr_trans_table.attr, | ||
| 137 | #endif | ||
| 138 | NULL | ||
| 139 | }; | ||
| 140 | static struct attribute_group stats_attr_group = { | ||
| 141 | .attrs = default_attrs, | ||
| 142 | .name = "stats" | ||
| 143 | }; | ||
| 144 | |||
| 145 | static int | ||
| 146 | freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | ||
| 147 | { | ||
| 148 | int index; | ||
| 149 | for (index = 0; index < stat->max_state; index++) | ||
| 150 | if (stat->freq_table[index] == freq) | ||
| 151 | return index; | ||
| 152 | return -1; | ||
| 153 | } | ||
| 154 | |||
| 155 | static void | ||
| 156 | cpufreq_stats_free_table (unsigned int cpu) | ||
| 157 | { | ||
| 158 | struct cpufreq_stats *stat = cpufreq_stats_table[cpu]; | ||
| 159 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
| 160 | if (policy && policy->cpu == cpu) | ||
| 161 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
| 162 | if (stat) { | ||
| 163 | kfree(stat->time_in_state); | ||
| 164 | kfree(stat); | ||
| 165 | } | ||
| 166 | cpufreq_stats_table[cpu] = NULL; | ||
| 167 | if (policy) | ||
| 168 | cpufreq_cpu_put(policy); | ||
| 169 | } | ||
| 170 | |||
| 171 | static int | ||
| 172 | cpufreq_stats_create_table (struct cpufreq_policy *policy, | ||
| 173 | struct cpufreq_frequency_table *table) | ||
| 174 | { | ||
| 175 | unsigned int i, j, count = 0, ret = 0; | ||
| 176 | struct cpufreq_stats *stat; | ||
| 177 | struct cpufreq_policy *data; | ||
| 178 | unsigned int alloc_size; | ||
| 179 | unsigned int cpu = policy->cpu; | ||
| 180 | if (cpufreq_stats_table[cpu]) | ||
| 181 | return -EBUSY; | ||
| 182 | if ((stat = kmalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) | ||
| 183 | return -ENOMEM; | ||
| 184 | memset(stat, 0, sizeof (struct cpufreq_stats)); | ||
| 185 | |||
| 186 | data = cpufreq_cpu_get(cpu); | ||
| 187 | if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group))) | ||
| 188 | goto error_out; | ||
| 189 | |||
| 190 | stat->cpu = cpu; | ||
| 191 | cpufreq_stats_table[cpu] = stat; | ||
| 192 | |||
| 193 | for (i=0; table[i].frequency != CPUFREQ_TABLE_END; i++) { | ||
| 194 | unsigned int freq = table[i].frequency; | ||
| 195 | if (freq == CPUFREQ_ENTRY_INVALID) | ||
| 196 | continue; | ||
| 197 | count++; | ||
| 198 | } | ||
| 199 | |||
| 200 | alloc_size = count * sizeof(int) + count * sizeof(long long); | ||
| 201 | |||
| 202 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 203 | alloc_size += count * count * sizeof(int); | ||
| 204 | #endif | ||
| 205 | stat->max_state = count; | ||
| 206 | stat->time_in_state = kmalloc(alloc_size, GFP_KERNEL); | ||
| 207 | if (!stat->time_in_state) { | ||
| 208 | ret = -ENOMEM; | ||
| 209 | goto error_out; | ||
| 210 | } | ||
| 211 | memset(stat->time_in_state, 0, alloc_size); | ||
| 212 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); | ||
| 213 | |||
| 214 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 215 | stat->trans_table = stat->freq_table + count; | ||
| 216 | #endif | ||
| 217 | j = 0; | ||
| 218 | for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { | ||
| 219 | unsigned int freq = table[i].frequency; | ||
| 220 | if (freq == CPUFREQ_ENTRY_INVALID) | ||
| 221 | continue; | ||
| 222 | if (freq_table_get_index(stat, freq) == -1) | ||
| 223 | stat->freq_table[j++] = freq; | ||
| 224 | } | ||
| 225 | stat->state_num = j; | ||
| 226 | spin_lock(&cpufreq_stats_lock); | ||
| 227 | stat->last_time = jiffies; | ||
| 228 | stat->last_index = freq_table_get_index(stat, policy->cur); | ||
| 229 | spin_unlock(&cpufreq_stats_lock); | ||
| 230 | cpufreq_cpu_put(data); | ||
| 231 | return 0; | ||
| 232 | error_out: | ||
| 233 | cpufreq_cpu_put(data); | ||
| 234 | kfree(stat); | ||
| 235 | cpufreq_stats_table[cpu] = NULL; | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | static int | ||
| 240 | cpufreq_stat_notifier_policy (struct notifier_block *nb, unsigned long val, | ||
| 241 | void *data) | ||
| 242 | { | ||
| 243 | int ret; | ||
| 244 | struct cpufreq_policy *policy = data; | ||
| 245 | struct cpufreq_frequency_table *table; | ||
| 246 | unsigned int cpu = policy->cpu; | ||
| 247 | if (val != CPUFREQ_NOTIFY) | ||
| 248 | return 0; | ||
| 249 | table = cpufreq_frequency_get_table(cpu); | ||
| 250 | if (!table) | ||
| 251 | return 0; | ||
| 252 | if ((ret = cpufreq_stats_create_table(policy, table))) | ||
| 253 | return ret; | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int | ||
| 258 | cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val, | ||
| 259 | void *data) | ||
| 260 | { | ||
| 261 | struct cpufreq_freqs *freq = data; | ||
| 262 | struct cpufreq_stats *stat; | ||
| 263 | int old_index, new_index; | ||
| 264 | |||
| 265 | if (val != CPUFREQ_POSTCHANGE) | ||
| 266 | return 0; | ||
| 267 | |||
| 268 | stat = cpufreq_stats_table[freq->cpu]; | ||
| 269 | if (!stat) | ||
| 270 | return 0; | ||
| 271 | old_index = freq_table_get_index(stat, freq->old); | ||
| 272 | new_index = freq_table_get_index(stat, freq->new); | ||
| 273 | |||
| 274 | cpufreq_stats_update(freq->cpu); | ||
| 275 | if (old_index == new_index) | ||
| 276 | return 0; | ||
| 277 | |||
| 278 | spin_lock(&cpufreq_stats_lock); | ||
| 279 | stat->last_index = new_index; | ||
| 280 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
| 281 | stat->trans_table[old_index * stat->max_state + new_index]++; | ||
| 282 | #endif | ||
| 283 | stat->total_trans++; | ||
| 284 | spin_unlock(&cpufreq_stats_lock); | ||
| 285 | return 0; | ||
| 286 | } | ||
| 287 | |||
| 288 | static struct notifier_block notifier_policy_block = { | ||
| 289 | .notifier_call = cpufreq_stat_notifier_policy | ||
| 290 | }; | ||
| 291 | |||
| 292 | static struct notifier_block notifier_trans_block = { | ||
| 293 | .notifier_call = cpufreq_stat_notifier_trans | ||
| 294 | }; | ||
| 295 | |||
| 296 | static int | ||
| 297 | __init cpufreq_stats_init(void) | ||
| 298 | { | ||
| 299 | int ret; | ||
| 300 | unsigned int cpu; | ||
| 301 | spin_lock_init(&cpufreq_stats_lock); | ||
| 302 | if ((ret = cpufreq_register_notifier(¬ifier_policy_block, | ||
| 303 | CPUFREQ_POLICY_NOTIFIER))) | ||
| 304 | return ret; | ||
| 305 | |||
| 306 | if ((ret = cpufreq_register_notifier(¬ifier_trans_block, | ||
| 307 | CPUFREQ_TRANSITION_NOTIFIER))) { | ||
| 308 | cpufreq_unregister_notifier(¬ifier_policy_block, | ||
| 309 | CPUFREQ_POLICY_NOTIFIER); | ||
| 310 | return ret; | ||
| 311 | } | ||
| 312 | |||
| 313 | for_each_cpu(cpu) | ||
| 314 | cpufreq_update_policy(cpu); | ||
| 315 | return 0; | ||
| 316 | } | ||
| 317 | static void | ||
| 318 | __exit cpufreq_stats_exit(void) | ||
| 319 | { | ||
| 320 | unsigned int cpu; | ||
| 321 | cpufreq_unregister_notifier(¬ifier_policy_block, | ||
| 322 | CPUFREQ_POLICY_NOTIFIER); | ||
| 323 | cpufreq_unregister_notifier(¬ifier_trans_block, | ||
| 324 | CPUFREQ_TRANSITION_NOTIFIER); | ||
| 325 | for_each_cpu(cpu) | ||
| 326 | cpufreq_stats_free_table(cpu); | ||
| 327 | } | ||
| 328 | |||
| 329 | MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); | ||
| 330 | MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats through sysfs filesystem"); | ||
| 331 | MODULE_LICENSE ("GPL"); | ||
| 332 | |||
| 333 | module_init(cpufreq_stats_init); | ||
| 334 | module_exit(cpufreq_stats_exit); | ||
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c new file mode 100644 index 000000000000..d32bf3593cd3 --- /dev/null +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
| @@ -0,0 +1,207 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/cpufreq/cpufreq_userspace.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2001 Russell King | ||
| 5 | * (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/config.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/smp.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/spinlock.h> | ||
| 19 | #include <linux/interrupt.h> | ||
| 20 | #include <linux/cpufreq.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/fs.h> | ||
| 23 | #include <linux/sysfs.h> | ||
| 24 | |||
| 25 | #include <asm/uaccess.h> | ||
| 26 | |||
| 27 | |||
| 28 | /** | ||
| 29 | * A few values needed by the userspace governor | ||
| 30 | */ | ||
| 31 | static unsigned int cpu_max_freq[NR_CPUS]; | ||
| 32 | static unsigned int cpu_min_freq[NR_CPUS]; | ||
| 33 | static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ | ||
| 34 | static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ | ||
| 35 | static unsigned int cpu_is_managed[NR_CPUS]; | ||
| 36 | static struct cpufreq_policy current_policy[NR_CPUS]; | ||
| 37 | |||
| 38 | static DECLARE_MUTEX (userspace_sem); | ||
| 39 | |||
| 40 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | ||
| 41 | |||
| 42 | /* keep track of frequency transitions */ | ||
| 43 | static int | ||
| 44 | userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | ||
| 45 | void *data) | ||
| 46 | { | ||
| 47 | struct cpufreq_freqs *freq = data; | ||
| 48 | |||
| 49 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); | ||
| 50 | cpu_cur_freq[freq->cpu] = freq->new; | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static struct notifier_block userspace_cpufreq_notifier_block = { | ||
| 56 | .notifier_call = userspace_cpufreq_notifier | ||
| 57 | }; | ||
| 58 | |||
| 59 | |||
| 60 | /** | ||
| 61 | * cpufreq_set - set the CPU frequency | ||
| 62 | * @freq: target frequency in kHz | ||
| 63 | * @cpu: CPU for which the frequency is to be set | ||
| 64 | * | ||
| 65 | * Sets the CPU frequency to freq. | ||
| 66 | */ | ||
| 67 | static int cpufreq_set(unsigned int freq, unsigned int cpu) | ||
| 68 | { | ||
| 69 | int ret = -EINVAL; | ||
| 70 | |||
| 71 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq); | ||
| 72 | |||
| 73 | down(&userspace_sem); | ||
| 74 | if (!cpu_is_managed[cpu]) | ||
| 75 | goto err; | ||
| 76 | |||
| 77 | cpu_set_freq[cpu] = freq; | ||
| 78 | |||
| 79 | if (freq < cpu_min_freq[cpu]) | ||
| 80 | freq = cpu_min_freq[cpu]; | ||
| 81 | if (freq > cpu_max_freq[cpu]) | ||
| 82 | freq = cpu_max_freq[cpu]; | ||
| 83 | |||
| 84 | /* | ||
| 85 | * We're safe from concurrent calls to ->target() here | ||
| 86 | * as we hold the userspace_sem lock. If we were calling | ||
| 87 | * cpufreq_driver_target, a deadlock situation might occur: | ||
| 88 | * A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock) | ||
| 89 | * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem) | ||
| 90 | */ | ||
| 91 | ret = __cpufreq_driver_target(¤t_policy[cpu], freq, | ||
| 92 | CPUFREQ_RELATION_L); | ||
| 93 | |||
| 94 | err: | ||
| 95 | up(&userspace_sem); | ||
| 96 | return ret; | ||
| 97 | } | ||
| 98 | |||
| 99 | |||
| 100 | /************************** sysfs interface ************************/ | ||
| 101 | static ssize_t show_speed (struct cpufreq_policy *policy, char *buf) | ||
| 102 | { | ||
| 103 | return sprintf (buf, "%u\n", cpu_cur_freq[policy->cpu]); | ||
| 104 | } | ||
| 105 | |||
| 106 | static ssize_t | ||
| 107 | store_speed (struct cpufreq_policy *policy, const char *buf, size_t count) | ||
| 108 | { | ||
| 109 | unsigned int freq = 0; | ||
| 110 | unsigned int ret; | ||
| 111 | |||
| 112 | ret = sscanf (buf, "%u", &freq); | ||
| 113 | if (ret != 1) | ||
| 114 | return -EINVAL; | ||
| 115 | |||
| 116 | cpufreq_set(freq, policy->cpu); | ||
| 117 | |||
| 118 | return count; | ||
| 119 | } | ||
| 120 | |||
| 121 | static struct freq_attr freq_attr_scaling_setspeed = | ||
| 122 | { | ||
| 123 | .attr = { .name = "scaling_setspeed", .mode = 0644, .owner = THIS_MODULE }, | ||
| 124 | .show = show_speed, | ||
| 125 | .store = store_speed, | ||
| 126 | }; | ||
| 127 | |||
| 128 | static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | ||
| 129 | unsigned int event) | ||
| 130 | { | ||
| 131 | unsigned int cpu = policy->cpu; | ||
| 132 | switch (event) { | ||
| 133 | case CPUFREQ_GOV_START: | ||
| 134 | if (!cpu_online(cpu)) | ||
| 135 | return -EINVAL; | ||
| 136 | BUG_ON(!policy->cur); | ||
| 137 | down(&userspace_sem); | ||
| 138 | cpu_is_managed[cpu] = 1; | ||
| 139 | cpu_min_freq[cpu] = policy->min; | ||
| 140 | cpu_max_freq[cpu] = policy->max; | ||
| 141 | cpu_cur_freq[cpu] = policy->cur; | ||
| 142 | cpu_set_freq[cpu] = policy->cur; | ||
| 143 | sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); | ||
| 144 | memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); | ||
| 145 | dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); | ||
| 146 | up(&userspace_sem); | ||
| 147 | break; | ||
| 148 | case CPUFREQ_GOV_STOP: | ||
| 149 | down(&userspace_sem); | ||
| 150 | cpu_is_managed[cpu] = 0; | ||
| 151 | cpu_min_freq[cpu] = 0; | ||
| 152 | cpu_max_freq[cpu] = 0; | ||
| 153 | cpu_set_freq[cpu] = 0; | ||
| 154 | sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); | ||
| 155 | dprintk("managing cpu %u stopped\n", cpu); | ||
| 156 | up(&userspace_sem); | ||
| 157 | break; | ||
| 158 | case CPUFREQ_GOV_LIMITS: | ||
| 159 | down(&userspace_sem); | ||
| 160 | cpu_min_freq[cpu] = policy->min; | ||
| 161 | cpu_max_freq[cpu] = policy->max; | ||
| 162 | dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]); | ||
| 163 | if (policy->max < cpu_set_freq[cpu]) { | ||
| 164 | __cpufreq_driver_target(¤t_policy[cpu], policy->max, | ||
| 165 | CPUFREQ_RELATION_H); | ||
| 166 | } else if (policy->min > cpu_set_freq[cpu]) { | ||
| 167 | __cpufreq_driver_target(¤t_policy[cpu], policy->min, | ||
| 168 | CPUFREQ_RELATION_L); | ||
| 169 | } else { | ||
| 170 | __cpufreq_driver_target(¤t_policy[cpu], cpu_set_freq[cpu], | ||
| 171 | CPUFREQ_RELATION_L); | ||
| 172 | } | ||
| 173 | memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); | ||
| 174 | up(&userspace_sem); | ||
| 175 | break; | ||
| 176 | } | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | |||
| 181 | struct cpufreq_governor cpufreq_gov_userspace = { | ||
| 182 | .name = "userspace", | ||
| 183 | .governor = cpufreq_governor_userspace, | ||
| 184 | .owner = THIS_MODULE, | ||
| 185 | }; | ||
| 186 | EXPORT_SYMBOL(cpufreq_gov_userspace); | ||
| 187 | |||
| 188 | static int __init cpufreq_gov_userspace_init(void) | ||
| 189 | { | ||
| 190 | cpufreq_register_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); | ||
| 191 | return cpufreq_register_governor(&cpufreq_gov_userspace); | ||
| 192 | } | ||
| 193 | |||
| 194 | |||
| 195 | static void __exit cpufreq_gov_userspace_exit(void) | ||
| 196 | { | ||
| 197 | cpufreq_unregister_governor(&cpufreq_gov_userspace); | ||
| 198 | cpufreq_unregister_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); | ||
| 199 | } | ||
| 200 | |||
| 201 | |||
| 202 | MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>, Russell King <rmk@arm.linux.org.uk>"); | ||
| 203 | MODULE_DESCRIPTION ("CPUfreq policy governor 'userspace'"); | ||
| 204 | MODULE_LICENSE ("GPL"); | ||
| 205 | |||
| 206 | fs_initcall(cpufreq_gov_userspace_init); | ||
| 207 | module_exit(cpufreq_gov_userspace_exit); | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c new file mode 100644 index 000000000000..ba460bdea60f --- /dev/null +++ b/drivers/cpufreq/freq_table.c | |||
| @@ -0,0 +1,225 @@ | |||
| 1 | /* | ||
| 2 | * linux/drivers/cpufreq/freq_table.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2002 - 2003 Dominik Brodowski | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/cpufreq.h> | ||
| 11 | |||
| 12 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) | ||
| 13 | |||
| 14 | /********************************************************************* | ||
| 15 | * FREQUENCY TABLE HELPERS * | ||
| 16 | *********************************************************************/ | ||
| 17 | |||
| 18 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, | ||
| 19 | struct cpufreq_frequency_table *table) | ||
| 20 | { | ||
| 21 | unsigned int min_freq = ~0; | ||
| 22 | unsigned int max_freq = 0; | ||
| 23 | unsigned int i = 0; | ||
| 24 | |||
| 25 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
| 26 | unsigned int freq = table[i].frequency; | ||
| 27 | if (freq == CPUFREQ_ENTRY_INVALID) { | ||
| 28 | dprintk("table entry %u is invalid, skipping\n", i); | ||
| 29 | |||
| 30 | continue; | ||
| 31 | } | ||
| 32 | dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); | ||
| 33 | if (freq < min_freq) | ||
| 34 | min_freq = freq; | ||
| 35 | if (freq > max_freq) | ||
| 36 | max_freq = freq; | ||
| 37 | } | ||
| 38 | |||
| 39 | policy->min = policy->cpuinfo.min_freq = min_freq; | ||
| 40 | policy->max = policy->cpuinfo.max_freq = max_freq; | ||
| 41 | |||
| 42 | if (policy->min == ~0) | ||
| 43 | return -EINVAL; | ||
| 44 | else | ||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); | ||
| 48 | |||
| 49 | |||
| 50 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | ||
| 51 | struct cpufreq_frequency_table *table) | ||
| 52 | { | ||
| 53 | unsigned int next_larger = ~0; | ||
| 54 | unsigned int i = 0; | ||
| 55 | unsigned int count = 0; | ||
| 56 | |||
| 57 | dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); | ||
| 58 | |||
| 59 | if (!cpu_online(policy->cpu)) | ||
| 60 | return -EINVAL; | ||
| 61 | |||
| 62 | cpufreq_verify_within_limits(policy, | ||
| 63 | policy->cpuinfo.min_freq, | ||
| 64 | policy->cpuinfo.max_freq); | ||
| 65 | |||
| 66 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
| 67 | unsigned int freq = table[i].frequency; | ||
| 68 | if (freq == CPUFREQ_ENTRY_INVALID) | ||
| 69 | continue; | ||
| 70 | if ((freq >= policy->min) && (freq <= policy->max)) | ||
| 71 | count++; | ||
| 72 | else if ((next_larger > freq) && (freq > policy->max)) | ||
| 73 | next_larger = freq; | ||
| 74 | } | ||
| 75 | |||
| 76 | if (!count) | ||
| 77 | policy->max = next_larger; | ||
| 78 | |||
| 79 | cpufreq_verify_within_limits(policy, | ||
| 80 | policy->cpuinfo.min_freq, | ||
| 81 | policy->cpuinfo.max_freq); | ||
| 82 | |||
| 83 | dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); | ||
| 88 | |||
| 89 | |||
| 90 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | ||
| 91 | struct cpufreq_frequency_table *table, | ||
| 92 | unsigned int target_freq, | ||
| 93 | unsigned int relation, | ||
| 94 | unsigned int *index) | ||
| 95 | { | ||
| 96 | struct cpufreq_frequency_table optimal = { .index = ~0, }; | ||
| 97 | struct cpufreq_frequency_table suboptimal = { .index = ~0, }; | ||
| 98 | unsigned int i; | ||
| 99 | |||
| 100 | dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); | ||
| 101 | |||
| 102 | switch (relation) { | ||
| 103 | case CPUFREQ_RELATION_H: | ||
| 104 | optimal.frequency = 0; | ||
| 105 | suboptimal.frequency = ~0; | ||
| 106 | break; | ||
| 107 | case CPUFREQ_RELATION_L: | ||
| 108 | optimal.frequency = ~0; | ||
| 109 | suboptimal.frequency = 0; | ||
| 110 | break; | ||
| 111 | } | ||
| 112 | |||
| 113 | if (!cpu_online(policy->cpu)) | ||
| 114 | return -EINVAL; | ||
| 115 | |||
| 116 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
| 117 | unsigned int freq = table[i].frequency; | ||
| 118 | if (freq == CPUFREQ_ENTRY_INVALID) | ||
| 119 | continue; | ||
| 120 | if ((freq < policy->min) || (freq > policy->max)) | ||
| 121 | continue; | ||
| 122 | switch(relation) { | ||
| 123 | case CPUFREQ_RELATION_H: | ||
| 124 | if (freq <= target_freq) { | ||
| 125 | if (freq >= optimal.frequency) { | ||
| 126 | optimal.frequency = freq; | ||
| 127 | optimal.index = i; | ||
| 128 | } | ||
| 129 | } else { | ||
| 130 | if (freq <= suboptimal.frequency) { | ||
| 131 | suboptimal.frequency = freq; | ||
| 132 | suboptimal.index = i; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | break; | ||
| 136 | case CPUFREQ_RELATION_L: | ||
| 137 | if (freq >= target_freq) { | ||
| 138 | if (freq <= optimal.frequency) { | ||
| 139 | optimal.frequency = freq; | ||
| 140 | optimal.index = i; | ||
| 141 | } | ||
| 142 | } else { | ||
| 143 | if (freq >= suboptimal.frequency) { | ||
| 144 | suboptimal.frequency = freq; | ||
| 145 | suboptimal.index = i; | ||
| 146 | } | ||
| 147 | } | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | if (optimal.index > i) { | ||
| 152 | if (suboptimal.index > i) | ||
| 153 | return -EINVAL; | ||
| 154 | *index = suboptimal.index; | ||
| 155 | } else | ||
| 156 | *index = optimal.index; | ||
| 157 | |||
| 158 | dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, | ||
| 159 | table[*index].index); | ||
| 160 | |||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); | ||
| 164 | |||
| 165 | static struct cpufreq_frequency_table *show_table[NR_CPUS]; | ||
| 166 | /** | ||
| 167 | * show_scaling_governor - show the current policy for the specified CPU | ||
| 168 | */ | ||
| 169 | static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf) | ||
| 170 | { | ||
| 171 | unsigned int i = 0; | ||
| 172 | unsigned int cpu = policy->cpu; | ||
| 173 | ssize_t count = 0; | ||
| 174 | struct cpufreq_frequency_table *table; | ||
| 175 | |||
| 176 | if (!show_table[cpu]) | ||
| 177 | return -ENODEV; | ||
| 178 | |||
| 179 | table = show_table[cpu]; | ||
| 180 | |||
| 181 | for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | ||
| 182 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | ||
| 183 | continue; | ||
| 184 | count += sprintf(&buf[count], "%d ", table[i].frequency); | ||
| 185 | } | ||
| 186 | count += sprintf(&buf[count], "\n"); | ||
| 187 | |||
| 188 | return count; | ||
| 189 | |||
| 190 | } | ||
| 191 | |||
| 192 | struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { | ||
| 193 | .attr = { .name = "scaling_available_frequencies", .mode = 0444, .owner=THIS_MODULE }, | ||
| 194 | .show = show_available_freqs, | ||
| 195 | }; | ||
| 196 | EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); | ||
| 197 | |||
| 198 | /* | ||
| 199 | * if you use these, you must assure that the frequency table is valid | ||
| 200 | * all the time between get_attr and put_attr! | ||
| 201 | */ | ||
| 202 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | ||
| 203 | unsigned int cpu) | ||
| 204 | { | ||
| 205 | dprintk("setting show_table for cpu %u to %p\n", cpu, table); | ||
| 206 | show_table[cpu] = table; | ||
| 207 | } | ||
| 208 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); | ||
| 209 | |||
| 210 | void cpufreq_frequency_table_put_attr(unsigned int cpu) | ||
| 211 | { | ||
| 212 | dprintk("clearing show_table for cpu %u\n", cpu); | ||
| 213 | show_table[cpu] = NULL; | ||
| 214 | } | ||
| 215 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | ||
| 216 | |||
| 217 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) | ||
| 218 | { | ||
| 219 | return show_table[cpu]; | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); | ||
| 222 | |||
| 223 | MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); | ||
| 224 | MODULE_DESCRIPTION ("CPUfreq frequency table helpers"); | ||
| 225 | MODULE_LICENSE ("GPL"); | ||
