aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq.c153
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c33
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c33
-rw-r--r--drivers/cpufreq/cpufreq_performance.c9
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c9
-rw-r--r--drivers/cpufreq/cpufreq_stats.c11
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/freq_table.c28
8 files changed, 205 insertions, 82 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47ab42db122..9fb2edf3661 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,7 +29,8 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
33 34
34/** 35/**
35 * The "cpufreq driver" - the arch- or hardware-dependent low 36 * The "cpufreq driver" - the arch- or hardware-dependent low
@@ -151,7 +152,8 @@ static void cpufreq_debug_disable_ratelimit(void)
151 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 152 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
152} 153}
153 154
154void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) 155void cpufreq_debug_printk(unsigned int type, const char *prefix,
156 const char *fmt, ...)
155{ 157{
156 char s[256]; 158 char s[256];
157 va_list args; 159 va_list args;
@@ -161,7 +163,8 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt
161 WARN_ON(!prefix); 163 WARN_ON(!prefix);
162 if (type & debug) { 164 if (type & debug) {
163 spin_lock_irqsave(&disable_ratelimit_lock, flags); 165 spin_lock_irqsave(&disable_ratelimit_lock, flags);
164 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { 166 if (!disable_ratelimit && debug_ratelimit
167 && !printk_ratelimit()) {
165 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 168 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
166 return; 169 return;
167 } 170 }
@@ -182,10 +185,12 @@ EXPORT_SYMBOL(cpufreq_debug_printk);
182 185
183 186
184module_param(debug, uint, 0644); 187module_param(debug, uint, 0644);
185MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); 188MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
189 " 2 to debug drivers, and 4 to debug governors.");
186 190
187module_param(debug_ratelimit, uint, 0644); 191module_param(debug_ratelimit, uint, 0644);
188MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); 192MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
193 " set to 0 to disable ratelimiting.");
189 194
190#else /* !CONFIG_CPU_FREQ_DEBUG */ 195#else /* !CONFIG_CPU_FREQ_DEBUG */
191 196
@@ -219,17 +224,23 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
219 if (!l_p_j_ref_freq) { 224 if (!l_p_j_ref_freq) {
220 l_p_j_ref = loops_per_jiffy; 225 l_p_j_ref = loops_per_jiffy;
221 l_p_j_ref_freq = ci->old; 226 l_p_j_ref_freq = ci->old;
222 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 227 dprintk("saving %lu as reference value for loops_per_jiffy;"
228 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
223 } 229 }
224 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 230 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
225 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || 231 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
226 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
227 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); 233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
228 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); 234 ci->new);
235 dprintk("scaling loops_per_jiffy to %lu"
236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
229 } 237 }
230} 238}
231#else 239#else
232static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } 240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241{
242 return;
243}
233#endif 244#endif
234 245
235 246
@@ -316,7 +327,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
316 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 327 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
317 *policy = CPUFREQ_POLICY_PERFORMANCE; 328 *policy = CPUFREQ_POLICY_PERFORMANCE;
318 err = 0; 329 err = 0;
319 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 330 } else if (!strnicmp(str_governor, "powersave",
331 CPUFREQ_NAME_LEN)) {
320 *policy = CPUFREQ_POLICY_POWERSAVE; 332 *policy = CPUFREQ_POLICY_POWERSAVE;
321 err = 0; 333 err = 0;
322 } 334 }
@@ -328,7 +340,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
328 t = __find_governor(str_governor); 340 t = __find_governor(str_governor);
329 341
330 if (t == NULL) { 342 if (t == NULL) {
331 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); 343 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
344 str_governor);
332 345
333 if (name) { 346 if (name) {
334 int ret; 347 int ret;
@@ -361,7 +374,8 @@ extern struct sysdev_class cpu_sysdev_class;
361 374
362 375
363/** 376/**
364 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information 377 * cpufreq_per_cpu_attr_read() / show_##file_name() -
378 * print out cpufreq information
365 * 379 *
366 * Write out information from cpufreq_driver->policy[cpu]; object must be 380 * Write out information from cpufreq_driver->policy[cpu]; object must be
367 * "unsigned int". 381 * "unsigned int".
@@ -380,7 +394,8 @@ show_one(scaling_min_freq, min);
380show_one(scaling_max_freq, max); 394show_one(scaling_max_freq, max);
381show_one(scaling_cur_freq, cur); 395show_one(scaling_cur_freq, cur);
382 396
383static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); 397static int __cpufreq_set_policy(struct cpufreq_policy *data,
398 struct cpufreq_policy *policy);
384 399
385/** 400/**
386 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 401 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -416,7 +431,8 @@ store_one(scaling_max_freq,max);
416/** 431/**
417 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 432 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
418 */ 433 */
419static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) 434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
435 char *buf)
420{ 436{
421 unsigned int cur_freq = cpufreq_get(policy->cpu); 437 unsigned int cur_freq = cpufreq_get(policy->cpu);
422 if (!cur_freq) 438 if (!cur_freq)
@@ -428,7 +444,8 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
428/** 444/**
429 * show_scaling_governor - show the current policy for the specified CPU 445 * show_scaling_governor - show the current policy for the specified CPU
430 */ 446 */
431static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) 447static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
448 char *buf)
432{ 449{
433 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 450 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
434 return sprintf(buf, "powersave\n"); 451 return sprintf(buf, "powersave\n");
@@ -458,7 +475,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
458 if (ret != 1) 475 if (ret != 1)
459 return -EINVAL; 476 return -EINVAL;
460 477
461 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) 478 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
479 &new_policy.governor))
462 return -EINVAL; 480 return -EINVAL;
463 481
464 lock_cpu_hotplug(); 482 lock_cpu_hotplug();
@@ -474,7 +492,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
474 492
475 unlock_cpu_hotplug(); 493 unlock_cpu_hotplug();
476 494
477 return ret ? ret : count; 495 if (ret)
496 return ret;
497 else
498 return count;
478} 499}
479 500
480/** 501/**
@@ -488,7 +509,7 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
488/** 509/**
489 * show_scaling_available_governors - show the available CPUfreq governors 510 * show_scaling_available_governors - show the available CPUfreq governors
490 */ 511 */
491static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, 512static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
492 char *buf) 513 char *buf)
493{ 514{
494 ssize_t i = 0; 515 ssize_t i = 0;
@@ -574,7 +595,11 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
574 policy = cpufreq_cpu_get(policy->cpu); 595 policy = cpufreq_cpu_get(policy->cpu);
575 if (!policy) 596 if (!policy)
576 return -EINVAL; 597 return -EINVAL;
577 ret = fattr->show ? fattr->show(policy,buf) : -EIO; 598 if (fattr->show)
599 ret = fattr->show(policy, buf);
600 else
601 ret = -EIO;
602
578 cpufreq_cpu_put(policy); 603 cpufreq_cpu_put(policy);
579 return ret; 604 return ret;
580} 605}
@@ -588,7 +613,11 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
588 policy = cpufreq_cpu_get(policy->cpu); 613 policy = cpufreq_cpu_get(policy->cpu);
589 if (!policy) 614 if (!policy)
590 return -EINVAL; 615 return -EINVAL;
591 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO; 616 if (fattr->store)
617 ret = fattr->store(policy, buf, count);
618 else
619 ret = -EIO;
620
592 cpufreq_cpu_put(policy); 621 cpufreq_cpu_put(policy);
593 return ret; 622 return ret;
594} 623}
@@ -913,7 +942,8 @@ static void handle_update(struct work_struct *work)
913 * We adjust to current frequency first, and need to clean up later. So either call 942 * We adjust to current frequency first, and need to clean up later. So either call
914 * to cpufreq_update_policy() or schedule handle_update()). 943 * to cpufreq_update_policy() or schedule handle_update()).
915 */ 944 */
916static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) 945static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
946 unsigned int new_freq)
917{ 947{
918 struct cpufreq_freqs freqs; 948 struct cpufreq_freqs freqs;
919 949
@@ -938,16 +968,16 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
938unsigned int cpufreq_quick_get(unsigned int cpu) 968unsigned int cpufreq_quick_get(unsigned int cpu)
939{ 969{
940 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 970 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
941 unsigned int ret = 0; 971 unsigned int ret_freq = 0;
942 972
943 if (policy) { 973 if (policy) {
944 mutex_lock(&policy->lock); 974 mutex_lock(&policy->lock);
945 ret = policy->cur; 975 ret_freq = policy->cur;
946 mutex_unlock(&policy->lock); 976 mutex_unlock(&policy->lock);
947 cpufreq_cpu_put(policy); 977 cpufreq_cpu_put(policy);
948 } 978 }
949 979
950 return (ret); 980 return (ret_freq);
951} 981}
952EXPORT_SYMBOL(cpufreq_quick_get); 982EXPORT_SYMBOL(cpufreq_quick_get);
953 983
@@ -961,7 +991,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
961unsigned int cpufreq_get(unsigned int cpu) 991unsigned int cpufreq_get(unsigned int cpu)
962{ 992{
963 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 993 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
964 unsigned int ret = 0; 994 unsigned int ret_freq = 0;
965 995
966 if (!policy) 996 if (!policy)
967 return 0; 997 return 0;
@@ -971,12 +1001,14 @@ unsigned int cpufreq_get(unsigned int cpu)
971 1001
972 mutex_lock(&policy->lock); 1002 mutex_lock(&policy->lock);
973 1003
974 ret = cpufreq_driver->get(cpu); 1004 ret_freq = cpufreq_driver->get(cpu);
975 1005
976 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1006 if (ret_freq && policy->cur &&
977 /* verify no discrepancy between actual and saved value exists */ 1007 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
978 if (unlikely(ret != policy->cur)) { 1008 /* verify no discrepancy between actual and
979 cpufreq_out_of_sync(cpu, policy->cur, ret); 1009 saved value exists */
1010 if (unlikely(ret_freq != policy->cur)) {
1011 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
980 schedule_work(&policy->update); 1012 schedule_work(&policy->update);
981 } 1013 }
982 } 1014 }
@@ -986,7 +1018,7 @@ unsigned int cpufreq_get(unsigned int cpu)
986out: 1018out:
987 cpufreq_cpu_put(policy); 1019 cpufreq_cpu_put(policy);
988 1020
989 return (ret); 1021 return (ret_freq);
990} 1022}
991EXPORT_SYMBOL(cpufreq_get); 1023EXPORT_SYMBOL(cpufreq_get);
992 1024
@@ -998,7 +1030,7 @@ EXPORT_SYMBOL(cpufreq_get);
998static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 1030static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
999{ 1031{
1000 int cpu = sysdev->id; 1032 int cpu = sysdev->id;
1001 unsigned int ret = 0; 1033 int ret = 0;
1002 unsigned int cur_freq = 0; 1034 unsigned int cur_freq = 0;
1003 struct cpufreq_policy *cpu_policy; 1035 struct cpufreq_policy *cpu_policy;
1004 1036
@@ -1080,7 +1112,7 @@ out:
1080static int cpufreq_resume(struct sys_device * sysdev) 1112static int cpufreq_resume(struct sys_device * sysdev)
1081{ 1113{
1082 int cpu = sysdev->id; 1114 int cpu = sysdev->id;
1083 unsigned int ret = 0; 1115 int ret = 0;
1084 struct cpufreq_policy *cpu_policy; 1116 struct cpufreq_policy *cpu_policy;
1085 1117
1086 dprintk("resuming cpu %u\n", cpu); 1118 dprintk("resuming cpu %u\n", cpu);
@@ -1276,22 +1308,45 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1276} 1308}
1277EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1309EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1278 1310
1311int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1312{
1313 int ret = 0;
1314
1315 policy = cpufreq_cpu_get(policy->cpu);
1316 if (!policy)
1317 return -EINVAL;
1318
1319 mutex_lock(&policy->lock);
1320
1321 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1322 ret = cpufreq_driver->getavg(policy->cpu);
1323
1324 mutex_unlock(&policy->lock);
1325
1326 cpufreq_cpu_put(policy);
1327 return ret;
1328}
1329EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
1330
1279/* 1331/*
1280 * Locking: Must be called with the lock_cpu_hotplug() lock held 1332 * Locking: Must be called with the lock_cpu_hotplug() lock held
1281 * when "event" is CPUFREQ_GOV_LIMITS 1333 * when "event" is CPUFREQ_GOV_LIMITS
1282 */ 1334 */
1283 1335
1284static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1336static int __cpufreq_governor(struct cpufreq_policy *policy,
1337 unsigned int event)
1285{ 1338{
1286 int ret; 1339 int ret;
1287 1340
1288 if (!try_module_get(policy->governor->owner)) 1341 if (!try_module_get(policy->governor->owner))
1289 return -EINVAL; 1342 return -EINVAL;
1290 1343
1291 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); 1344 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1345 policy->cpu, event);
1292 ret = policy->governor->governor(policy, event); 1346 ret = policy->governor->governor(policy, event);
1293 1347
1294 /* we keep one module reference alive for each CPU governed by this CPU */ 1348 /* we keep one module reference alive for
1349 each CPU governed by this CPU */
1295 if ((event != CPUFREQ_GOV_START) || ret) 1350 if ((event != CPUFREQ_GOV_START) || ret)
1296 module_put(policy->governor->owner); 1351 module_put(policy->governor->owner);
1297 if ((event == CPUFREQ_GOV_STOP) && !ret) 1352 if ((event == CPUFREQ_GOV_STOP) && !ret)
@@ -1367,9 +1422,12 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1367 1422
1368 1423
1369/* 1424/*
1425 * data : current policy.
1426 * policy : policy to be set.
1370 * Locking: Must be called with the lock_cpu_hotplug() lock held 1427 * Locking: Must be called with the lock_cpu_hotplug() lock held
1371 */ 1428 */
1372static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1429static int __cpufreq_set_policy(struct cpufreq_policy *data,
1430 struct cpufreq_policy *policy)
1373{ 1431{
1374 int ret = 0; 1432 int ret = 0;
1375 1433
@@ -1377,7 +1435,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1377 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1435 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1378 policy->min, policy->max); 1436 policy->min, policy->max);
1379 1437
1380 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1438 memcpy(&policy->cpuinfo, &data->cpuinfo,
1439 sizeof(struct cpufreq_cpuinfo));
1381 1440
1382 if (policy->min > data->min && policy->min > policy->max) { 1441 if (policy->min > data->min && policy->min > policy->max) {
1383 ret = -EINVAL; 1442 ret = -EINVAL;
@@ -1410,7 +1469,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1410 data->min = policy->min; 1469 data->min = policy->min;
1411 data->max = policy->max; 1470 data->max = policy->max;
1412 1471
1413 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); 1472 dprintk("new min and max freqs are %u - %u kHz\n",
1473 data->min, data->max);
1414 1474
1415 if (cpufreq_driver->setpolicy) { 1475 if (cpufreq_driver->setpolicy) {
1416 data->policy = policy->policy; 1476 data->policy = policy->policy;
@@ -1431,10 +1491,12 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1431 data->governor = policy->governor; 1491 data->governor = policy->governor;
1432 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1492 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1433 /* new governor failed, so re-start old one */ 1493 /* new governor failed, so re-start old one */
1434 dprintk("starting governor %s failed\n", data->governor->name); 1494 dprintk("starting governor %s failed\n",
1495 data->governor->name);
1435 if (old_gov) { 1496 if (old_gov) {
1436 data->governor = old_gov; 1497 data->governor = old_gov;
1437 __cpufreq_governor(data, CPUFREQ_GOV_START); 1498 __cpufreq_governor(data,
1499 CPUFREQ_GOV_START);
1438 } 1500 }
1439 ret = -EINVAL; 1501 ret = -EINVAL;
1440 goto error_out; 1502 goto error_out;
@@ -1524,7 +1586,8 @@ int cpufreq_update_policy(unsigned int cpu)
1524 data->cur = policy.cur; 1586 data->cur = policy.cur;
1525 } else { 1587 } else {
1526 if (data->cur != policy.cur) 1588 if (data->cur != policy.cur)
1527 cpufreq_out_of_sync(cpu, data->cur, policy.cur); 1589 cpufreq_out_of_sync(cpu, data->cur,
1590 policy.cur);
1528 } 1591 }
1529 } 1592 }
1530 1593
@@ -1626,8 +1689,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1626 1689
1627 /* if all ->init() calls failed, unregister */ 1690 /* if all ->init() calls failed, unregister */
1628 if (ret) { 1691 if (ret) {
1629 dprintk("no CPU initialized for driver %s\n", driver_data->name); 1692 dprintk("no CPU initialized for driver %s\n",
1630 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1693 driver_data->name);
1694 sysdev_driver_unregister(&cpu_sysdev_class,
1695 &cpufreq_sysdev_driver);
1631 1696
1632 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1697 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1633 cpufreq_driver = NULL; 1698 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5ef5ede5b88..eef0270c6f3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -44,15 +44,17 @@
44 * latency of the processor. The governor will work on any processor with 44 * latency of the processor. The governor will work on any processor with
45 * transition latency <= 10mS, using appropriate sampling 45 * transition latency <= 10mS, using appropriate sampling
46 * rate. 46 * rate.
47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 47 * For CPUs with transition latency > 10mS (mostly drivers
48 * this governor will not work. 48 * with CPUFREQ_ETERNAL), this governor will not work.
49 * All times here are in uS. 49 * All times here are in uS.
50 */ 50 */
51static unsigned int def_sampling_rate; 51static unsigned int def_sampling_rate;
52#define MIN_SAMPLING_RATE_RATIO (2) 52#define MIN_SAMPLING_RATE_RATIO (2)
53/* for correct statistics, we need at least 10 ticks between each measure */ 53/* for correct statistics, we need at least 10 ticks between each measure */
54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 54#define MIN_STAT_SAMPLING_RATE \
55#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 55 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
56#define MIN_SAMPLING_RATE \
57 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 58#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 59#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (1) 60#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -103,11 +105,16 @@ static struct dbs_tuners dbs_tuners_ins = {
103 105
104static inline unsigned int get_cpu_idle_time(unsigned int cpu) 106static inline unsigned int get_cpu_idle_time(unsigned int cpu)
105{ 107{
106 return kstat_cpu(cpu).cpustat.idle + 108 unsigned int add_nice = 0, ret;
109
110 if (dbs_tuners_ins.ignore_nice)
111 add_nice = kstat_cpu(cpu).cpustat.nice;
112
113 ret = kstat_cpu(cpu).cpustat.idle +
107 kstat_cpu(cpu).cpustat.iowait + 114 kstat_cpu(cpu).cpustat.iowait +
108 ( dbs_tuners_ins.ignore_nice ? 115 add_nice;
109 kstat_cpu(cpu).cpustat.nice : 116
110 0); 117 return ret;
111} 118}
112 119
113/************************** sysfs interface ************************/ 120/************************** sysfs interface ************************/
@@ -452,6 +459,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
452 unsigned int cpu = policy->cpu; 459 unsigned int cpu = policy->cpu;
453 struct cpu_dbs_info_s *this_dbs_info; 460 struct cpu_dbs_info_s *this_dbs_info;
454 unsigned int j; 461 unsigned int j;
462 int rc;
455 463
456 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 464 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
457 465
@@ -468,6 +476,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
468 break; 476 break;
469 477
470 mutex_lock(&dbs_mutex); 478 mutex_lock(&dbs_mutex);
479
480 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
481 if (rc) {
482 mutex_unlock(&dbs_mutex);
483 return rc;
484 }
485
471 for_each_cpu_mask(j, policy->cpus) { 486 for_each_cpu_mask(j, policy->cpus) {
472 struct cpu_dbs_info_s *j_dbs_info; 487 struct cpu_dbs_info_s *j_dbs_info;
473 j_dbs_info = &per_cpu(cpu_dbs_info, j); 488 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -480,7 +495,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
480 this_dbs_info->enable = 1; 495 this_dbs_info->enable = 1;
481 this_dbs_info->down_skip = 0; 496 this_dbs_info->down_skip = 0;
482 this_dbs_info->requested_freq = policy->cur; 497 this_dbs_info->requested_freq = policy->cur;
483 sysfs_create_group(&policy->kobj, &dbs_attr_group); 498
484 dbs_enable++; 499 dbs_enable++;
485 /* 500 /*
486 * Start the timerschedule work, when this governor 501 * Start the timerschedule work, when this governor
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e1cc5113c2a..f697449327c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -41,8 +41,10 @@
41static unsigned int def_sampling_rate; 41static unsigned int def_sampling_rate;
42#define MIN_SAMPLING_RATE_RATIO (2) 42#define MIN_SAMPLING_RATE_RATIO (2)
43/* for correct statistics, we need at least 10 ticks between each measure */ 43/* for correct statistics, we need at least 10 ticks between each measure */
44#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 44#define MIN_STAT_SAMPLING_RATE \
45#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 45 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
46#define MIN_SAMPLING_RATE \
47 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
46#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 48#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
48#define TRANSITION_LATENCY_LIMIT (10 * 1000) 50#define TRANSITION_LATENCY_LIMIT (10 * 1000)
@@ -206,7 +208,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
206 ret = sscanf(buf, "%u", &input); 208 ret = sscanf(buf, "%u", &input);
207 209
208 mutex_lock(&dbs_mutex); 210 mutex_lock(&dbs_mutex);
209 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 211 if (ret != 1 || input > MAX_SAMPLING_RATE
212 || input < MIN_SAMPLING_RATE) {
210 mutex_unlock(&dbs_mutex); 213 mutex_unlock(&dbs_mutex);
211 return -EINVAL; 214 return -EINVAL;
212 } 215 }
@@ -397,8 +400,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
397 * policy. To be safe, we focus 10 points under the threshold. 400 * policy. To be safe, we focus 10 points under the threshold.
398 */ 401 */
399 if (load < (dbs_tuners_ins.up_threshold - 10)) { 402 if (load < (dbs_tuners_ins.up_threshold - 10)) {
400 unsigned int freq_next = (policy->cur * load) / 403 unsigned int freq_next, freq_cur;
404
405 freq_cur = cpufreq_driver_getavg(policy);
406 if (!freq_cur)
407 freq_cur = policy->cur;
408
409 freq_next = (freq_cur * load) /
401 (dbs_tuners_ins.up_threshold - 10); 410 (dbs_tuners_ins.up_threshold - 10);
411
402 if (!dbs_tuners_ins.powersave_bias) { 412 if (!dbs_tuners_ins.powersave_bias) {
403 __cpufreq_driver_target(policy, freq_next, 413 __cpufreq_driver_target(policy, freq_next,
404 CPUFREQ_RELATION_L); 414 CPUFREQ_RELATION_L);
@@ -472,6 +482,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
472 unsigned int cpu = policy->cpu; 482 unsigned int cpu = policy->cpu;
473 struct cpu_dbs_info_s *this_dbs_info; 483 struct cpu_dbs_info_s *this_dbs_info;
474 unsigned int j; 484 unsigned int j;
485 int rc;
475 486
476 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 487 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
477 488
@@ -494,12 +505,23 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
494 if (dbs_enable == 1) { 505 if (dbs_enable == 1) {
495 kondemand_wq = create_workqueue("kondemand"); 506 kondemand_wq = create_workqueue("kondemand");
496 if (!kondemand_wq) { 507 if (!kondemand_wq) {
497 printk(KERN_ERR "Creation of kondemand failed\n"); 508 printk(KERN_ERR
509 "Creation of kondemand failed\n");
498 dbs_enable--; 510 dbs_enable--;
499 mutex_unlock(&dbs_mutex); 511 mutex_unlock(&dbs_mutex);
500 return -ENOSPC; 512 return -ENOSPC;
501 } 513 }
502 } 514 }
515
516 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
517 if (rc) {
518 if (dbs_enable == 1)
519 destroy_workqueue(kondemand_wq);
520 dbs_enable--;
521 mutex_unlock(&dbs_mutex);
522 return rc;
523 }
524
503 for_each_cpu_mask(j, policy->cpus) { 525 for_each_cpu_mask(j, policy->cpus) {
504 struct cpu_dbs_info_s *j_dbs_info; 526 struct cpu_dbs_info_s *j_dbs_info;
505 j_dbs_info = &per_cpu(cpu_dbs_info, j); 527 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -509,7 +531,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
509 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 531 j_dbs_info->prev_cpu_wall = get_jiffies_64();
510 } 532 }
511 this_dbs_info->enable = 1; 533 this_dbs_info->enable = 1;
512 sysfs_create_group(&policy->kobj, &dbs_attr_group);
513 /* 534 /*
514 * Start the timerschedule work, when this governor 535 * Start the timerschedule work, when this governor
515 * is used for first time 536 * is used for first time
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index de91e3371ef..e8e1451ef1c 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
19 20
20 21
21static int cpufreq_governor_performance(struct cpufreq_policy *policy, 22static int cpufreq_governor_performance(struct cpufreq_policy *policy,
@@ -24,8 +25,10 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
24 switch (event) { 25 switch (event) {
25 case CPUFREQ_GOV_START: 26 case CPUFREQ_GOV_START:
26 case CPUFREQ_GOV_LIMITS: 27 case CPUFREQ_GOV_LIMITS:
27 dprintk("setting to %u kHz because of event %u\n", policy->max, event); 28 dprintk("setting to %u kHz because of event %u\n",
28 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 29 policy->max, event);
30 __cpufreq_driver_target(policy, policy->max,
31 CPUFREQ_RELATION_H);
29 break; 32 break;
30 default: 33 default:
31 break; 34 break;
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 0a2596044e6..13fe06b94b0 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -15,7 +15,8 @@
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) 18#define dprintk(msg...) \
19 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
19 20
20static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 21static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
21 unsigned int event) 22 unsigned int event)
@@ -23,8 +24,10 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
23 switch (event) { 24 switch (event) {
24 case CPUFREQ_GOV_START: 25 case CPUFREQ_GOV_START:
25 case CPUFREQ_GOV_LIMITS: 26 case CPUFREQ_GOV_LIMITS:
26 dprintk("setting to %u kHz because of event %u\n", policy->min, event); 27 dprintk("setting to %u kHz because of event %u\n",
27 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 28 policy->min, event);
29 __cpufreq_driver_target(policy, policy->min,
30 CPUFREQ_RELATION_L);
28 break; 31 break;
29 default: 32 default:
30 break; 33 break;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c2ecc599dc5..6742b1adf2c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -351,8 +351,8 @@ __init cpufreq_stats_init(void)
351 351
352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
353 for_each_online_cpu(cpu) { 353 for_each_online_cpu(cpu) {
354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
355 (void *)(long)cpu); 355 CPU_ONLINE, (void *)(long)cpu);
356 } 356 }
357 return 0; 357 return 0;
358} 358}
@@ -368,14 +368,15 @@ __exit cpufreq_stats_exit(void)
368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 368 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
369 lock_cpu_hotplug(); 369 lock_cpu_hotplug();
370 for_each_online_cpu(cpu) { 370 for_each_online_cpu(cpu) {
371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD, 371 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
372 (void *)(long)cpu); 372 CPU_DEAD, (void *)(long)cpu);
373 } 373 }
374 unlock_cpu_hotplug(); 374 unlock_cpu_hotplug();
375} 375}
376 376
377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 377MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats through sysfs filesystem"); 378MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
379 "through sysfs filesystem");
379MODULE_LICENSE ("GPL"); 380MODULE_LICENSE ("GPL");
380 381
381module_init(cpufreq_stats_init); 382module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index a06c204589c..2a4eb0bfaf3 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -131,19 +131,26 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
131 unsigned int event) 131 unsigned int event)
132{ 132{
133 unsigned int cpu = policy->cpu; 133 unsigned int cpu = policy->cpu;
134 int rc = 0;
135
134 switch (event) { 136 switch (event) {
135 case CPUFREQ_GOV_START: 137 case CPUFREQ_GOV_START:
136 if (!cpu_online(cpu)) 138 if (!cpu_online(cpu))
137 return -EINVAL; 139 return -EINVAL;
138 BUG_ON(!policy->cur); 140 BUG_ON(!policy->cur);
139 mutex_lock(&userspace_mutex); 141 mutex_lock(&userspace_mutex);
142 rc = sysfs_create_file (&policy->kobj,
143 &freq_attr_scaling_setspeed.attr);
144 if (rc)
145 goto start_out;
146
140 cpu_is_managed[cpu] = 1; 147 cpu_is_managed[cpu] = 1;
141 cpu_min_freq[cpu] = policy->min; 148 cpu_min_freq[cpu] = policy->min;
142 cpu_max_freq[cpu] = policy->max; 149 cpu_max_freq[cpu] = policy->max;
143 cpu_cur_freq[cpu] = policy->cur; 150 cpu_cur_freq[cpu] = policy->cur;
144 cpu_set_freq[cpu] = policy->cur; 151 cpu_set_freq[cpu] = policy->cur;
145 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
146 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 152 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
153start_out:
147 mutex_unlock(&userspace_mutex); 154 mutex_unlock(&userspace_mutex);
148 break; 155 break;
149 case CPUFREQ_GOV_STOP: 156 case CPUFREQ_GOV_STOP:
@@ -180,7 +187,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
180 mutex_unlock(&userspace_mutex); 187 mutex_unlock(&userspace_mutex);
181 break; 188 break;
182 } 189 }
183 return 0; 190 return rc;
184} 191}
185 192
186 193
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 551f4ccf87f..e7490925fdc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,7 +9,8 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/cpufreq.h> 10#include <linux/cpufreq.h>
11 11
12#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) 12#define dprintk(msg...) \
13 cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
13 14
14/********************************************************************* 15/*********************************************************************
15 * FREQUENCY TABLE HELPERS * 16 * FREQUENCY TABLE HELPERS *
@@ -29,7 +30,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
29 30
30 continue; 31 continue;
31 } 32 }
32 dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); 33 dprintk("table entry %u: %u kHz, %u index\n",
34 i, freq, table[i].index);
33 if (freq < min_freq) 35 if (freq < min_freq)
34 min_freq = freq; 36 min_freq = freq;
35 if (freq > max_freq) 37 if (freq > max_freq)
@@ -54,13 +56,14 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
54 unsigned int i; 56 unsigned int i;
55 unsigned int count = 0; 57 unsigned int count = 0;
56 58
57 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 59 dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n",
60 policy->min, policy->max, policy->cpu);
58 61
59 if (!cpu_online(policy->cpu)) 62 if (!cpu_online(policy->cpu))
60 return -EINVAL; 63 return -EINVAL;
61 64
62 cpufreq_verify_within_limits(policy, 65 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
63 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 66 policy->cpuinfo.max_freq);
64 67
65 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 68 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
66 unsigned int freq = table[i].frequency; 69 unsigned int freq = table[i].frequency;
@@ -75,10 +78,11 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
75 if (!count) 78 if (!count)
76 policy->max = next_larger; 79 policy->max = next_larger;
77 80
78 cpufreq_verify_within_limits(policy, 81 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
79 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); 82 policy->cpuinfo.max_freq);
80 83
81 dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); 84 dprintk("verification lead to (%u - %u kHz) for cpu %u\n",
85 policy->min, policy->max, policy->cpu);
82 86
83 return 0; 87 return 0;
84} 88}
@@ -101,7 +105,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
101 }; 105 };
102 unsigned int i; 106 unsigned int i;
103 107
104 dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); 108 dprintk("request for target %u kHz (relation: %u) for cpu %u\n",
109 target_freq, relation, policy->cpu);
105 110
106 switch (relation) { 111 switch (relation) {
107 case CPUFREQ_RELATION_H: 112 case CPUFREQ_RELATION_H:
@@ -192,7 +197,10 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
192} 197}
193 198
194struct freq_attr cpufreq_freq_attr_scaling_available_freqs = { 199struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
195 .attr = { .name = "scaling_available_frequencies", .mode = 0444, .owner=THIS_MODULE }, 200 .attr = { .name = "scaling_available_frequencies",
201 .mode = 0444,
202 .owner=THIS_MODULE
203 },
196 .show = show_available_freqs, 204 .show = show_available_freqs,
197}; 205};
198EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); 206EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);