aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 14:04:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 14:04:08 -0400
commitada19a31a90b4f46c040c25ef4ef8ffc203c7fc6 (patch)
tree7d523d2d90dbaa973c5843d6219ec149b5949243 /drivers/cpufreq
parent8d80ce80e1d58ba9cd3e3972b112cccd6b4008f4 (diff)
parent36e8abf3edcd2d207193ec5741d1a2a645d470a5 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: (35 commits) [CPUFREQ] Prevent p4-clockmod from auto-binding to the ondemand governor. [CPUFREQ] Make cpufreq-nforce2 less obnoxious [CPUFREQ] p4-clockmod reports wrong frequency. [CPUFREQ] powernow-k8: Use a common exit path. [CPUFREQ] Change link order of x86 cpufreq modules [CPUFREQ] conservative: remove 10x from def_sampling_rate [CPUFREQ] conservative: fixup governor to function more like ondemand logic [CPUFREQ] conservative: fix dbs_cpufreq_notifier so freq is not locked [CPUFREQ] conservative: amend author's email address [CPUFREQ] Use swap() in longhaul.c [CPUFREQ] checkpatch cleanups for acpi-cpufreq [CPUFREQ] powernow-k8: Only print error message once, not per core. [CPUFREQ] ondemand/conservative: sanitize sampling_rate restrictions [CPUFREQ] ondemand/conservative: deprecate sampling_rate{min,max} [CPUFREQ] powernow-k8: Always compile powernow-k8 driver with ACPI support [CPUFREQ] Introduce /sys/devices/system/cpu/cpu*/cpufreq/cpuinfo_transition_latency [CPUFREQ] checkpatch cleanups for powernow-k8 [CPUFREQ] checkpatch cleanups for ondemand governor. [CPUFREQ] checkpatch cleanups for powernow-k7 [CPUFREQ] checkpatch cleanups for speedstep related drivers. ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c404
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c74
-rw-r--r--drivers/cpufreq/cpufreq_stats.c74
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c27
-rw-r--r--drivers/cpufreq/freq_table.c18
6 files changed, 379 insertions, 273 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d6daf3c507d3..d270e8eb3e67 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -104,7 +104,8 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
104 104
105 105
106/* internal prototypes */ 106/* internal prototypes */
107static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 107static int __cpufreq_governor(struct cpufreq_policy *policy,
108 unsigned int event);
108static unsigned int __cpufreq_get(unsigned int cpu); 109static unsigned int __cpufreq_get(unsigned int cpu);
109static void handle_update(struct work_struct *work); 110static void handle_update(struct work_struct *work);
110 111
@@ -128,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
128pure_initcall(init_cpufreq_transition_notifier_list); 129pure_initcall(init_cpufreq_transition_notifier_list);
129 130
130static LIST_HEAD(cpufreq_governor_list); 131static LIST_HEAD(cpufreq_governor_list);
131static DEFINE_MUTEX (cpufreq_governor_mutex); 132static DEFINE_MUTEX(cpufreq_governor_mutex);
132 133
133struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 134struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
134{ 135{
@@ -371,7 +372,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
371 struct cpufreq_governor *t; 372 struct cpufreq_governor *t;
372 373
373 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 374 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
374 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) 375 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
375 return t; 376 return t;
376 377
377 return NULL; 378 return NULL;
@@ -429,15 +430,11 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
429 430
430 mutex_unlock(&cpufreq_governor_mutex); 431 mutex_unlock(&cpufreq_governor_mutex);
431 } 432 }
432 out: 433out:
433 return err; 434 return err;
434} 435}
435 436
436 437
437/* drivers/base/cpu.c */
438extern struct sysdev_class cpu_sysdev_class;
439
440
441/** 438/**
442 * cpufreq_per_cpu_attr_read() / show_##file_name() - 439 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information 440 * print out cpufreq information
@@ -450,11 +447,12 @@ extern struct sysdev_class cpu_sysdev_class;
450static ssize_t show_##file_name \ 447static ssize_t show_##file_name \
451(struct cpufreq_policy *policy, char *buf) \ 448(struct cpufreq_policy *policy, char *buf) \
452{ \ 449{ \
453 return sprintf (buf, "%u\n", policy->object); \ 450 return sprintf(buf, "%u\n", policy->object); \
454} 451}
455 452
456show_one(cpuinfo_min_freq, cpuinfo.min_freq); 453show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457show_one(cpuinfo_max_freq, cpuinfo.max_freq); 454show_one(cpuinfo_max_freq, cpuinfo.max_freq);
455show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
458show_one(scaling_min_freq, min); 456show_one(scaling_min_freq, min);
459show_one(scaling_max_freq, max); 457show_one(scaling_max_freq, max);
460show_one(scaling_cur_freq, cur); 458show_one(scaling_cur_freq, cur);
@@ -476,7 +474,7 @@ static ssize_t store_##file_name \
476 if (ret) \ 474 if (ret) \
477 return -EINVAL; \ 475 return -EINVAL; \
478 \ 476 \
479 ret = sscanf (buf, "%u", &new_policy.object); \ 477 ret = sscanf(buf, "%u", &new_policy.object); \
480 if (ret != 1) \ 478 if (ret != 1) \
481 return -EINVAL; \ 479 return -EINVAL; \
482 \ 480 \
@@ -486,8 +484,8 @@ static ssize_t store_##file_name \
486 return ret ? ret : count; \ 484 return ret ? ret : count; \
487} 485}
488 486
489store_one(scaling_min_freq,min); 487store_one(scaling_min_freq, min);
490store_one(scaling_max_freq,max); 488store_one(scaling_max_freq, max);
491 489
492/** 490/**
493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 491 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
@@ -507,12 +505,13 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
507 */ 505 */
508static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 506static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509{ 507{
510 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 508 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n"); 509 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 510 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n"); 511 return sprintf(buf, "performance\n");
514 else if (policy->governor) 512 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); 513 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
514 policy->governor->name);
516 return -EINVAL; 515 return -EINVAL;
517} 516}
518 517
@@ -531,7 +530,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
531 if (ret) 530 if (ret)
532 return ret; 531 return ret;
533 532
534 ret = sscanf (buf, "%15s", str_governor); 533 ret = sscanf(buf, "%15s", str_governor);
535 if (ret != 1) 534 if (ret != 1)
536 return -EINVAL; 535 return -EINVAL;
537 536
@@ -575,7 +574,8 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
575 } 574 }
576 575
577 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 576 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) 577 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
578 - (CPUFREQ_NAME_LEN + 2)))
579 goto out; 579 goto out;
580 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); 580 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
581 } 581 }
@@ -594,7 +594,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
596 if (i >= (PAGE_SIZE - 5)) 596 if (i >= (PAGE_SIZE - 5))
597 break; 597 break;
598 } 598 }
599 i += sprintf(&buf[i], "\n"); 599 i += sprintf(&buf[i], "\n");
600 return i; 600 return i;
@@ -660,6 +660,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
660define_one_ro0400(cpuinfo_cur_freq); 660define_one_ro0400(cpuinfo_cur_freq);
661define_one_ro(cpuinfo_min_freq); 661define_one_ro(cpuinfo_min_freq);
662define_one_ro(cpuinfo_max_freq); 662define_one_ro(cpuinfo_max_freq);
663define_one_ro(cpuinfo_transition_latency);
663define_one_ro(scaling_available_governors); 664define_one_ro(scaling_available_governors);
664define_one_ro(scaling_driver); 665define_one_ro(scaling_driver);
665define_one_ro(scaling_cur_freq); 666define_one_ro(scaling_cur_freq);
@@ -673,6 +674,7 @@ define_one_rw(scaling_setspeed);
673static struct attribute *default_attrs[] = { 674static struct attribute *default_attrs[] = {
674 &cpuinfo_min_freq.attr, 675 &cpuinfo_min_freq.attr,
675 &cpuinfo_max_freq.attr, 676 &cpuinfo_max_freq.attr,
677 &cpuinfo_transition_latency.attr,
676 &scaling_min_freq.attr, 678 &scaling_min_freq.attr,
677 &scaling_max_freq.attr, 679 &scaling_max_freq.attr,
678 &affected_cpus.attr, 680 &affected_cpus.attr,
@@ -684,10 +686,10 @@ static struct attribute *default_attrs[] = {
684 NULL 686 NULL
685}; 687};
686 688
687#define to_policy(k) container_of(k,struct cpufreq_policy,kobj) 689#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
688#define to_attr(a) container_of(a,struct freq_attr,attr) 690#define to_attr(a) container_of(a, struct freq_attr, attr)
689 691
690static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf) 692static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
691{ 693{
692 struct cpufreq_policy *policy = to_policy(kobj); 694 struct cpufreq_policy *policy = to_policy(kobj);
693 struct freq_attr *fattr = to_attr(attr); 695 struct freq_attr *fattr = to_attr(attr);
@@ -853,10 +855,10 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
853 if (cpu == j) 855 if (cpu == j)
854 continue; 856 continue;
855 857
856 /* check for existing affected CPUs. They may not be aware 858 /* Check for existing affected CPUs.
857 * of it due to CPU Hotplug. 859 * They may not be aware of it due to CPU Hotplug.
858 */ 860 */
859 managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths? 861 managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
860 if (unlikely(managed_policy)) { 862 if (unlikely(managed_policy)) {
861 863
862 /* Set proper policy_cpu */ 864 /* Set proper policy_cpu */
@@ -1127,8 +1129,8 @@ static void handle_update(struct work_struct *work)
1127 * @old_freq: CPU frequency the kernel thinks the CPU runs at 1129 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1128 * @new_freq: CPU frequency the CPU actually runs at 1130 * @new_freq: CPU frequency the CPU actually runs at
1129 * 1131 *
1130 * We adjust to current frequency first, and need to clean up later. So either call 1132 * We adjust to current frequency first, and need to clean up later.
1131 * to cpufreq_update_policy() or schedule handle_update()). 1133 * So either call to cpufreq_update_policy() or schedule handle_update()).
1132 */ 1134 */
1133static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1135static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1134 unsigned int new_freq) 1136 unsigned int new_freq)
@@ -1610,7 +1612,8 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1610 1612
1611/** 1613/**
1612 * cpufreq_get_policy - get the current cpufreq_policy 1614 * cpufreq_get_policy - get the current cpufreq_policy
1613 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written 1615 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1616 * is written
1614 * 1617 *
1615 * Reads the current cpufreq policy. 1618 * Reads the current cpufreq policy.
1616 */ 1619 */
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0320962c4ec5..2ecd95e4ab1a 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com> 6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk> 7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -13,22 +13,17 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/smp.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/ctype.h>
20#include <linux/cpufreq.h> 17#include <linux/cpufreq.h>
21#include <linux/sysctl.h>
22#include <linux/types.h>
23#include <linux/fs.h>
24#include <linux/sysfs.h>
25#include <linux/cpu.h> 18#include <linux/cpu.h>
26#include <linux/kmod.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h> 19#include <linux/jiffies.h>
29#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
30#include <linux/percpu.h>
31#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/hrtimer.h>
23#include <linux/tick.h>
24#include <linux/ktime.h>
25#include <linux/sched.h>
26
32/* 27/*
33 * dbs is used in this file as a shortform for demandbased switching 28 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler 29 * It helps to keep variable names smaller, simpler
@@ -43,19 +38,31 @@
43 * latency of the processor. The governor will work on any processor with 38 * latency of the processor. The governor will work on any processor with
44 * transition latency <= 10mS, using appropriate sampling 39 * transition latency <= 10mS, using appropriate sampling
45 * rate. 40 * rate.
46 * For CPUs with transition latency > 10mS (mostly drivers 41 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
47 * with CPUFREQ_ETERNAL), this governor will not work. 42 * this governor will not work.
48 * All times here are in uS. 43 * All times here are in uS.
49 */ 44 */
50static unsigned int def_sampling_rate; 45static unsigned int def_sampling_rate;
51#define MIN_SAMPLING_RATE_RATIO (2) 46#define MIN_SAMPLING_RATE_RATIO (2)
52/* for correct statistics, we need at least 10 ticks between each measure */ 47/* for correct statistics, we need at least 10 ticks between each measure */
53#define MIN_STAT_SAMPLING_RATE \ 48#define MIN_STAT_SAMPLING_RATE \
54 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 49 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
55#define MIN_SAMPLING_RATE \ 50#define MIN_SAMPLING_RATE \
56 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 51 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
52/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
53 * Define the minimal settable sampling rate to the greater of:
54 * - "HW transition latency" * 100 (same as default sampling / 10)
55 * - MIN_STAT_SAMPLING_RATE
56 * To avoid that userspace shoots itself.
57*/
58static unsigned int minimum_sampling_rate(void)
59{
60 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
61}
62
63/* This will also vanish soon with removing sampling_rate_max */
57#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 64#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 65#define LATENCY_MULTIPLIER (1000)
59#define DEF_SAMPLING_DOWN_FACTOR (1) 66#define DEF_SAMPLING_DOWN_FACTOR (1)
60#define MAX_SAMPLING_DOWN_FACTOR (10) 67#define MAX_SAMPLING_DOWN_FACTOR (10)
61#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 68#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -63,12 +70,15 @@ static unsigned int def_sampling_rate;
63static void do_dbs_timer(struct work_struct *work); 70static void do_dbs_timer(struct work_struct *work);
64 71
65struct cpu_dbs_info_s { 72struct cpu_dbs_info_s {
73 cputime64_t prev_cpu_idle;
74 cputime64_t prev_cpu_wall;
75 cputime64_t prev_cpu_nice;
66 struct cpufreq_policy *cur_policy; 76 struct cpufreq_policy *cur_policy;
67 unsigned int prev_cpu_idle_up; 77 struct delayed_work work;
68 unsigned int prev_cpu_idle_down;
69 unsigned int enable;
70 unsigned int down_skip; 78 unsigned int down_skip;
71 unsigned int requested_freq; 79 unsigned int requested_freq;
80 int cpu;
81 unsigned int enable:1;
72}; 82};
73static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 83static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
74 84
@@ -82,19 +92,18 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
83 * is recursive for the same process. -Venki 93 * is recursive for the same process. -Venki
84 */ 94 */
85static DEFINE_MUTEX (dbs_mutex); 95static DEFINE_MUTEX(dbs_mutex);
86static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
87 96
88struct dbs_tuners { 97static struct workqueue_struct *kconservative_wq;
98
99static struct dbs_tuners {
89 unsigned int sampling_rate; 100 unsigned int sampling_rate;
90 unsigned int sampling_down_factor; 101 unsigned int sampling_down_factor;
91 unsigned int up_threshold; 102 unsigned int up_threshold;
92 unsigned int down_threshold; 103 unsigned int down_threshold;
93 unsigned int ignore_nice; 104 unsigned int ignore_nice;
94 unsigned int freq_step; 105 unsigned int freq_step;
95}; 106} dbs_tuners_ins = {
96
97static struct dbs_tuners dbs_tuners_ins = {
98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 107 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 108 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 109 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -102,18 +111,37 @@ static struct dbs_tuners dbs_tuners_ins = {
102 .freq_step = 5, 111 .freq_step = 5,
103}; 112};
104 113
105static inline unsigned int get_cpu_idle_time(unsigned int cpu) 114static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
115 cputime64_t *wall)
106{ 116{
107 unsigned int add_nice = 0, ret; 117 cputime64_t idle_time;
118 cputime64_t cur_wall_time;
119 cputime64_t busy_time;
108 120
109 if (dbs_tuners_ins.ignore_nice) 121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
110 add_nice = kstat_cpu(cpu).cpustat.nice; 122 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
123 kstat_cpu(cpu).cpustat.system);
124
125 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
126 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
127 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
128 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
129
130 idle_time = cputime64_sub(cur_wall_time, busy_time);
131 if (wall)
132 *wall = cur_wall_time;
133
134 return idle_time;
135}
136
137static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
138{
139 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
111 140
112 ret = kstat_cpu(cpu).cpustat.idle + 141 if (idle_time == -1ULL)
113 kstat_cpu(cpu).cpustat.iowait + 142 return get_cpu_idle_time_jiffy(cpu, wall);
114 add_nice;
115 143
116 return ret; 144 return idle_time;
117} 145}
118 146
119/* keep track of frequency transitions */ 147/* keep track of frequency transitions */
@@ -125,10 +153,21 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
125 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 153 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
126 freq->cpu); 154 freq->cpu);
127 155
156 struct cpufreq_policy *policy;
157
128 if (!this_dbs_info->enable) 158 if (!this_dbs_info->enable)
129 return 0; 159 return 0;
130 160
131 this_dbs_info->requested_freq = freq->new; 161 policy = this_dbs_info->cur_policy;
162
163 /*
164 * we only care if our internally tracked freq moves outside
165 * the 'valid' ranges of freqency available to us otherwise
166 * we do not change it
167 */
168 if (this_dbs_info->requested_freq > policy->max
169 || this_dbs_info->requested_freq < policy->min)
170 this_dbs_info->requested_freq = freq->new;
132 171
133 return 0; 172 return 0;
134} 173}
@@ -140,16 +179,31 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
140/************************** sysfs interface ************************/ 179/************************** sysfs interface ************************/
141static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 180static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
142{ 181{
143 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 182 static int print_once;
183
184 if (!print_once) {
185 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
186 "sysfs file is deprecated - used by: %s\n",
187 current->comm);
188 print_once = 1;
189 }
190 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
144} 191}
145 192
146static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 193static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
147{ 194{
148 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 195 static int print_once;
196
197 if (!print_once) {
198 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
199 "sysfs file is deprecated - used by: %s\n", current->comm);
200 print_once = 1;
201 }
202 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
149} 203}
150 204
151#define define_one_ro(_name) \ 205#define define_one_ro(_name) \
152static struct freq_attr _name = \ 206static struct freq_attr _name = \
153__ATTR(_name, 0444, show_##_name, NULL) 207__ATTR(_name, 0444, show_##_name, NULL)
154 208
155define_one_ro(sampling_rate_max); 209define_one_ro(sampling_rate_max);
@@ -174,7 +228,8 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
174{ 228{
175 unsigned int input; 229 unsigned int input;
176 int ret; 230 int ret;
177 ret = sscanf (buf, "%u", &input); 231 ret = sscanf(buf, "%u", &input);
232
178 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 233 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
179 return -EINVAL; 234 return -EINVAL;
180 235
@@ -190,15 +245,13 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
190{ 245{
191 unsigned int input; 246 unsigned int input;
192 int ret; 247 int ret;
193 ret = sscanf (buf, "%u", &input); 248 ret = sscanf(buf, "%u", &input);
194 249
195 mutex_lock(&dbs_mutex); 250 if (ret != 1)
196 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
197 mutex_unlock(&dbs_mutex);
198 return -EINVAL; 251 return -EINVAL;
199 }
200 252
201 dbs_tuners_ins.sampling_rate = input; 253 mutex_lock(&dbs_mutex);
254 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
202 mutex_unlock(&dbs_mutex); 255 mutex_unlock(&dbs_mutex);
203 256
204 return count; 257 return count;
@@ -209,10 +262,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
209{ 262{
210 unsigned int input; 263 unsigned int input;
211 int ret; 264 int ret;
212 ret = sscanf (buf, "%u", &input); 265 ret = sscanf(buf, "%u", &input);
213 266
214 mutex_lock(&dbs_mutex); 267 mutex_lock(&dbs_mutex);
215 if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { 268 if (ret != 1 || input > 100 ||
269 input <= dbs_tuners_ins.down_threshold) {
216 mutex_unlock(&dbs_mutex); 270 mutex_unlock(&dbs_mutex);
217 return -EINVAL; 271 return -EINVAL;
218 } 272 }
@@ -228,10 +282,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
228{ 282{
229 unsigned int input; 283 unsigned int input;
230 int ret; 284 int ret;
231 ret = sscanf (buf, "%u", &input); 285 ret = sscanf(buf, "%u", &input);
232 286
233 mutex_lock(&dbs_mutex); 287 mutex_lock(&dbs_mutex);
234 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { 288 /* cannot be lower than 11 otherwise freq will not fall */
289 if (ret != 1 || input < 11 || input > 100 ||
290 input >= dbs_tuners_ins.up_threshold) {
235 mutex_unlock(&dbs_mutex); 291 mutex_unlock(&dbs_mutex);
236 return -EINVAL; 292 return -EINVAL;
237 } 293 }
@@ -264,12 +320,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
264 } 320 }
265 dbs_tuners_ins.ignore_nice = input; 321 dbs_tuners_ins.ignore_nice = input;
266 322
267 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 323 /* we need to re-evaluate prev_cpu_idle */
268 for_each_online_cpu(j) { 324 for_each_online_cpu(j) {
269 struct cpu_dbs_info_s *j_dbs_info; 325 struct cpu_dbs_info_s *dbs_info;
270 j_dbs_info = &per_cpu(cpu_dbs_info, j); 326 dbs_info = &per_cpu(cpu_dbs_info, j);
271 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 327 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
272 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 328 &dbs_info->prev_cpu_wall);
329 if (dbs_tuners_ins.ignore_nice)
330 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
273 } 331 }
274 mutex_unlock(&dbs_mutex); 332 mutex_unlock(&dbs_mutex);
275 333
@@ -281,7 +339,6 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
281{ 339{
282 unsigned int input; 340 unsigned int input;
283 int ret; 341 int ret;
284
285 ret = sscanf(buf, "%u", &input); 342 ret = sscanf(buf, "%u", &input);
286 343
287 if (ret != 1) 344 if (ret != 1)
@@ -310,7 +367,7 @@ define_one_rw(down_threshold);
310define_one_rw(ignore_nice_load); 367define_one_rw(ignore_nice_load);
311define_one_rw(freq_step); 368define_one_rw(freq_step);
312 369
313static struct attribute * dbs_attributes[] = { 370static struct attribute *dbs_attributes[] = {
314 &sampling_rate_max.attr, 371 &sampling_rate_max.attr,
315 &sampling_rate_min.attr, 372 &sampling_rate_min.attr,
316 &sampling_rate.attr, 373 &sampling_rate.attr,
@@ -329,55 +386,78 @@ static struct attribute_group dbs_attr_group = {
329 386
330/************************** sysfs end ************************/ 387/************************** sysfs end ************************/
331 388
332static void dbs_check_cpu(int cpu) 389static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
333{ 390{
334 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 391 unsigned int load = 0;
335 unsigned int tmp_idle_ticks, total_idle_ticks;
336 unsigned int freq_target; 392 unsigned int freq_target;
337 unsigned int freq_down_sampling_rate;
338 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
339 struct cpufreq_policy *policy;
340 393
341 if (!this_dbs_info->enable) 394 struct cpufreq_policy *policy;
342 return; 395 unsigned int j;
343 396
344 policy = this_dbs_info->cur_policy; 397 policy = this_dbs_info->cur_policy;
345 398
346 /* 399 /*
347 * The default safe range is 20% to 80% 400 * Every sampling_rate, we check, if current idle time is less
348 * Every sampling_rate, we check 401 * than 20% (default), then we try to increase frequency
349 * - If current idle time is less than 20%, then we try to 402 * Every sampling_rate*sampling_down_factor, we check, if current
350 * increase frequency 403 * idle time is more than 80%, then we try to decrease frequency
351 * Every sampling_rate*sampling_down_factor, we check
352 * - If current idle time is more than 80%, then we try to
353 * decrease frequency
354 * 404 *
355 * Any frequency increase takes it to the maximum frequency. 405 * Any frequency increase takes it to the maximum frequency.
356 * Frequency reduction happens at minimum steps of 406 * Frequency reduction happens at minimum steps of
357 * 5% (default) of max_frequency 407 * 5% (default) of maximum frequency
358 */ 408 */
359 409
360 /* Check for frequency increase */ 410 /* Get Absolute Load */
361 idle_ticks = UINT_MAX; 411 for_each_cpu(j, policy->cpus) {
412 struct cpu_dbs_info_s *j_dbs_info;
413 cputime64_t cur_wall_time, cur_idle_time;
414 unsigned int idle_time, wall_time;
362 415
363 /* Check for frequency increase */ 416 j_dbs_info = &per_cpu(cpu_dbs_info, j);
364 total_idle_ticks = get_cpu_idle_time(cpu); 417
365 tmp_idle_ticks = total_idle_ticks - 418 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
366 this_dbs_info->prev_cpu_idle_up; 419
367 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 420 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
421 j_dbs_info->prev_cpu_wall);
422 j_dbs_info->prev_cpu_wall = cur_wall_time;
423
424 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
425 j_dbs_info->prev_cpu_idle);
426 j_dbs_info->prev_cpu_idle = cur_idle_time;
427
428 if (dbs_tuners_ins.ignore_nice) {
429 cputime64_t cur_nice;
430 unsigned long cur_nice_jiffies;
431
432 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
433 j_dbs_info->prev_cpu_nice);
434 /*
435 * Assumption: nice time between sampling periods will
436 * be less than 2^32 jiffies for 32 bit sys
437 */
438 cur_nice_jiffies = (unsigned long)
439 cputime64_to_jiffies64(cur_nice);
368 440
369 if (tmp_idle_ticks < idle_ticks) 441 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
370 idle_ticks = tmp_idle_ticks; 442 idle_time += jiffies_to_usecs(cur_nice_jiffies);
443 }
444
445 if (unlikely(!wall_time || wall_time < idle_time))
446 continue;
447
448 load = 100 * (wall_time - idle_time) / wall_time;
449 }
371 450
372 /* Scale idle ticks by 100 and compare with up and down ticks */ 451 /*
373 idle_ticks *= 100; 452 * break out if we 'cannot' reduce the speed as the user might
374 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 453 * want freq_step to be zero
375 usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 454 */
455 if (dbs_tuners_ins.freq_step == 0)
456 return;
376 457
377 if (idle_ticks < up_idle_ticks) { 458 /* Check for frequency increase */
459 if (load > dbs_tuners_ins.up_threshold) {
378 this_dbs_info->down_skip = 0; 460 this_dbs_info->down_skip = 0;
379 this_dbs_info->prev_cpu_idle_down =
380 this_dbs_info->prev_cpu_idle_up;
381 461
382 /* if we are already at full speed then break out early */ 462 /* if we are already at full speed then break out early */
383 if (this_dbs_info->requested_freq == policy->max) 463 if (this_dbs_info->requested_freq == policy->max)
@@ -398,49 +478,24 @@ static void dbs_check_cpu(int cpu)
398 return; 478 return;
399 } 479 }
400 480
401 /* Check for frequency decrease */ 481 /*
402 this_dbs_info->down_skip++; 482 * The optimal frequency is the frequency that is the lowest that
403 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) 483 * can support the current CPU usage without triggering the up
404 return; 484 * policy. To be safe, we focus 10 points under the threshold.
405 485 */
406 /* Check for frequency decrease */ 486 if (load < (dbs_tuners_ins.down_threshold - 10)) {
407 total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
408 tmp_idle_ticks = total_idle_ticks -
409 this_dbs_info->prev_cpu_idle_down;
410 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
411
412 if (tmp_idle_ticks < idle_ticks)
413 idle_ticks = tmp_idle_ticks;
414
415 /* Scale idle ticks by 100 and compare with up and down ticks */
416 idle_ticks *= 100;
417 this_dbs_info->down_skip = 0;
418
419 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
420 dbs_tuners_ins.sampling_down_factor;
421 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
422 usecs_to_jiffies(freq_down_sampling_rate);
423
424 if (idle_ticks > down_idle_ticks) {
425 /*
426 * if we are already at the lowest speed then break out early
427 * or if we 'cannot' reduce the speed as the user might want
428 * freq_target to be zero
429 */
430 if (this_dbs_info->requested_freq == policy->min
431 || dbs_tuners_ins.freq_step == 0)
432 return;
433
434 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; 487 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
435 488
436 /* max freq cannot be less than 100. But who knows.... */
437 if (unlikely(freq_target == 0))
438 freq_target = 5;
439
440 this_dbs_info->requested_freq -= freq_target; 489 this_dbs_info->requested_freq -= freq_target;
441 if (this_dbs_info->requested_freq < policy->min) 490 if (this_dbs_info->requested_freq < policy->min)
442 this_dbs_info->requested_freq = policy->min; 491 this_dbs_info->requested_freq = policy->min;
443 492
493 /*
494 * if we cannot reduce the frequency anymore, break out early
495 */
496 if (policy->cur == policy->min)
497 return;
498
444 __cpufreq_driver_target(policy, this_dbs_info->requested_freq, 499 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
445 CPUFREQ_RELATION_H); 500 CPUFREQ_RELATION_H);
446 return; 501 return;
@@ -449,27 +504,45 @@ static void dbs_check_cpu(int cpu)
449 504
450static void do_dbs_timer(struct work_struct *work) 505static void do_dbs_timer(struct work_struct *work)
451{ 506{
452 int i; 507 struct cpu_dbs_info_s *dbs_info =
453 mutex_lock(&dbs_mutex); 508 container_of(work, struct cpu_dbs_info_s, work.work);
454 for_each_online_cpu(i) 509 unsigned int cpu = dbs_info->cpu;
455 dbs_check_cpu(i); 510
456 schedule_delayed_work(&dbs_work, 511 /* We want all CPUs to do sampling nearly on same jiffy */
457 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 512 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
458 mutex_unlock(&dbs_mutex); 513
514 delay -= jiffies % delay;
515
516 if (lock_policy_rwsem_write(cpu) < 0)
517 return;
518
519 if (!dbs_info->enable) {
520 unlock_policy_rwsem_write(cpu);
521 return;
522 }
523
524 dbs_check_cpu(dbs_info);
525
526 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
527 unlock_policy_rwsem_write(cpu);
459} 528}
460 529
461static inline void dbs_timer_init(void) 530static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
462{ 531{
463 init_timer_deferrable(&dbs_work.timer); 532 /* We want all CPUs to do sampling nearly on same jiffy */
464 schedule_delayed_work(&dbs_work, 533 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
465 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 534 delay -= jiffies % delay;
466 return; 535
536 dbs_info->enable = 1;
537 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
538 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
539 delay);
467} 540}
468 541
469static inline void dbs_timer_exit(void) 542static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
470{ 543{
471 cancel_delayed_work(&dbs_work); 544 dbs_info->enable = 0;
472 return; 545 cancel_delayed_work(&dbs_info->work);
473} 546}
474 547
475static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 548static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -503,11 +576,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
503 j_dbs_info = &per_cpu(cpu_dbs_info, j); 576 j_dbs_info = &per_cpu(cpu_dbs_info, j);
504 j_dbs_info->cur_policy = policy; 577 j_dbs_info->cur_policy = policy;
505 578
506 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 579 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
507 j_dbs_info->prev_cpu_idle_down 580 &j_dbs_info->prev_cpu_wall);
508 = j_dbs_info->prev_cpu_idle_up; 581 if (dbs_tuners_ins.ignore_nice) {
582 j_dbs_info->prev_cpu_nice =
583 kstat_cpu(j).cpustat.nice;
584 }
509 } 585 }
510 this_dbs_info->enable = 1;
511 this_dbs_info->down_skip = 0; 586 this_dbs_info->down_skip = 0;
512 this_dbs_info->requested_freq = policy->cur; 587 this_dbs_info->requested_freq = policy->cur;
513 588
@@ -523,38 +598,36 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
523 if (latency == 0) 598 if (latency == 0)
524 latency = 1; 599 latency = 1;
525 600
526 def_sampling_rate = 10 * latency * 601 def_sampling_rate =
527 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 602 max(latency * LATENCY_MULTIPLIER,
528 603 MIN_STAT_SAMPLING_RATE);
529 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
530 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
531 604
532 dbs_tuners_ins.sampling_rate = def_sampling_rate; 605 dbs_tuners_ins.sampling_rate = def_sampling_rate;
533 606
534 dbs_timer_init();
535 cpufreq_register_notifier( 607 cpufreq_register_notifier(
536 &dbs_cpufreq_notifier_block, 608 &dbs_cpufreq_notifier_block,
537 CPUFREQ_TRANSITION_NOTIFIER); 609 CPUFREQ_TRANSITION_NOTIFIER);
538 } 610 }
611 dbs_timer_init(this_dbs_info);
539 612
540 mutex_unlock(&dbs_mutex); 613 mutex_unlock(&dbs_mutex);
614
541 break; 615 break;
542 616
543 case CPUFREQ_GOV_STOP: 617 case CPUFREQ_GOV_STOP:
544 mutex_lock(&dbs_mutex); 618 mutex_lock(&dbs_mutex);
545 this_dbs_info->enable = 0; 619 dbs_timer_exit(this_dbs_info);
546 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 620 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
547 dbs_enable--; 621 dbs_enable--;
622
548 /* 623 /*
549 * Stop the timerschedule work, when this governor 624 * Stop the timerschedule work, when this governor
550 * is used for first time 625 * is used for first time
551 */ 626 */
552 if (dbs_enable == 0) { 627 if (dbs_enable == 0)
553 dbs_timer_exit();
554 cpufreq_unregister_notifier( 628 cpufreq_unregister_notifier(
555 &dbs_cpufreq_notifier_block, 629 &dbs_cpufreq_notifier_block,
556 CPUFREQ_TRANSITION_NOTIFIER); 630 CPUFREQ_TRANSITION_NOTIFIER);
557 }
558 631
559 mutex_unlock(&dbs_mutex); 632 mutex_unlock(&dbs_mutex);
560 633
@@ -571,6 +644,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
571 this_dbs_info->cur_policy, 644 this_dbs_info->cur_policy,
572 policy->min, CPUFREQ_RELATION_L); 645 policy->min, CPUFREQ_RELATION_L);
573 mutex_unlock(&dbs_mutex); 646 mutex_unlock(&dbs_mutex);
647
574 break; 648 break;
575 } 649 }
576 return 0; 650 return 0;
@@ -588,23 +662,33 @@ struct cpufreq_governor cpufreq_gov_conservative = {
588 662
589static int __init cpufreq_gov_dbs_init(void) 663static int __init cpufreq_gov_dbs_init(void)
590{ 664{
591 return cpufreq_register_governor(&cpufreq_gov_conservative); 665 int err;
666
667 kconservative_wq = create_workqueue("kconservative");
668 if (!kconservative_wq) {
669 printk(KERN_ERR "Creation of kconservative failed\n");
670 return -EFAULT;
671 }
672
673 err = cpufreq_register_governor(&cpufreq_gov_conservative);
674 if (err)
675 destroy_workqueue(kconservative_wq);
676
677 return err;
592} 678}
593 679
594static void __exit cpufreq_gov_dbs_exit(void) 680static void __exit cpufreq_gov_dbs_exit(void)
595{ 681{
596 /* Make sure that the scheduled work is indeed not running */
597 flush_scheduled_work();
598
599 cpufreq_unregister_governor(&cpufreq_gov_conservative); 682 cpufreq_unregister_governor(&cpufreq_gov_conservative);
683 destroy_workqueue(kconservative_wq);
600} 684}
601 685
602 686
603MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>"); 687MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
604MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for " 688MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
605 "Low Latency Frequency Transition capable processors " 689 "Low Latency Frequency Transition capable processors "
606 "optimised for use in a battery environment"); 690 "optimised for use in a battery environment");
607MODULE_LICENSE ("GPL"); 691MODULE_LICENSE("GPL");
608 692
609#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 693#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
610fs_initcall(cpufreq_gov_dbs_init); 694fs_initcall(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6f45b1658a67..338f428a15b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -21,6 +21,7 @@
21#include <linux/hrtimer.h> 21#include <linux/hrtimer.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/ktime.h> 23#include <linux/ktime.h>
24#include <linux/sched.h>
24 25
25/* 26/*
26 * dbs is used in this file as a shortform for demandbased switching 27 * dbs is used in this file as a shortform for demandbased switching
@@ -51,8 +52,20 @@ static unsigned int def_sampling_rate;
51 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 52 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
52#define MIN_SAMPLING_RATE \ 53#define MIN_SAMPLING_RATE \
53 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 54 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
56 * Define the minimal settable sampling rate to the greater of:
57 * - "HW transition latency" * 100 (same as default sampling / 10)
58 * - MIN_STAT_SAMPLING_RATE
59 * To avoid that userspace shoots itself.
60*/
61static unsigned int minimum_sampling_rate(void)
62{
63 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
64}
65
66/* This will also vanish soon with removing sampling_rate_max */
54#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 67#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
55#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 68#define LATENCY_MULTIPLIER (1000)
56#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 69#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
57 70
58static void do_dbs_timer(struct work_struct *work); 71static void do_dbs_timer(struct work_struct *work);
@@ -65,14 +78,14 @@ struct cpu_dbs_info_s {
65 cputime64_t prev_cpu_wall; 78 cputime64_t prev_cpu_wall;
66 cputime64_t prev_cpu_nice; 79 cputime64_t prev_cpu_nice;
67 struct cpufreq_policy *cur_policy; 80 struct cpufreq_policy *cur_policy;
68 struct delayed_work work; 81 struct delayed_work work;
69 struct cpufreq_frequency_table *freq_table; 82 struct cpufreq_frequency_table *freq_table;
70 unsigned int freq_lo; 83 unsigned int freq_lo;
71 unsigned int freq_lo_jiffies; 84 unsigned int freq_lo_jiffies;
72 unsigned int freq_hi_jiffies; 85 unsigned int freq_hi_jiffies;
73 int cpu; 86 int cpu;
74 unsigned int enable:1, 87 unsigned int enable:1,
75 sample_type:1; 88 sample_type:1;
76}; 89};
77static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 90static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
78 91
@@ -203,12 +216,28 @@ static void ondemand_powersave_bias_init(void)
203/************************** sysfs interface ************************/ 216/************************** sysfs interface ************************/
204static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 217static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
205{ 218{
206 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 219 static int print_once;
220
221 if (!print_once) {
222 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
223 "sysfs file is deprecated - used by: %s\n",
224 current->comm);
225 print_once = 1;
226 }
227 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
207} 228}
208 229
209static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 230static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
210{ 231{
211 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 232 static int print_once;
233
234 if (!print_once) {
235 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
236 "sysfs file is deprecated - used by: %s\n",
237 current->comm);
238 print_once = 1;
239 }
240 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
212} 241}
213 242
214#define define_one_ro(_name) \ 243#define define_one_ro(_name) \
@@ -238,13 +267,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
238 ret = sscanf(buf, "%u", &input); 267 ret = sscanf(buf, "%u", &input);
239 268
240 mutex_lock(&dbs_mutex); 269 mutex_lock(&dbs_mutex);
241 if (ret != 1 || input > MAX_SAMPLING_RATE 270 if (ret != 1) {
242 || input < MIN_SAMPLING_RATE) {
243 mutex_unlock(&dbs_mutex); 271 mutex_unlock(&dbs_mutex);
244 return -EINVAL; 272 return -EINVAL;
245 } 273 }
246 274 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
247 dbs_tuners_ins.sampling_rate = input;
248 mutex_unlock(&dbs_mutex); 275 mutex_unlock(&dbs_mutex);
249 276
250 return count; 277 return count;
@@ -279,14 +306,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
279 unsigned int j; 306 unsigned int j;
280 307
281 ret = sscanf(buf, "%u", &input); 308 ret = sscanf(buf, "%u", &input);
282 if ( ret != 1 ) 309 if (ret != 1)
283 return -EINVAL; 310 return -EINVAL;
284 311
285 if ( input > 1 ) 312 if (input > 1)
286 input = 1; 313 input = 1;
287 314
288 mutex_lock(&dbs_mutex); 315 mutex_lock(&dbs_mutex);
289 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 316 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
290 mutex_unlock(&dbs_mutex); 317 mutex_unlock(&dbs_mutex);
291 return count; 318 return count;
292 } 319 }
@@ -337,7 +364,7 @@ define_one_rw(up_threshold);
337define_one_rw(ignore_nice_load); 364define_one_rw(ignore_nice_load);
338define_one_rw(powersave_bias); 365define_one_rw(powersave_bias);
339 366
340static struct attribute * dbs_attributes[] = { 367static struct attribute *dbs_attributes[] = {
341 &sampling_rate_max.attr, 368 &sampling_rate_max.attr,
342 &sampling_rate_min.attr, 369 &sampling_rate_min.attr,
343 &sampling_rate.attr, 370 &sampling_rate.attr,
@@ -512,8 +539,7 @@ static void do_dbs_timer(struct work_struct *work)
512 } 539 }
513 } else { 540 } else {
514 __cpufreq_driver_target(dbs_info->cur_policy, 541 __cpufreq_driver_target(dbs_info->cur_policy,
515 dbs_info->freq_lo, 542 dbs_info->freq_lo, CPUFREQ_RELATION_H);
516 CPUFREQ_RELATION_H);
517 } 543 }
518 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 544 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
519 unlock_policy_rwsem_write(cpu); 545 unlock_policy_rwsem_write(cpu);
@@ -530,7 +556,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
530 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 556 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
531 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 557 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
532 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 558 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
533 delay); 559 delay);
534} 560}
535 561
536static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 562static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -591,11 +617,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
591 if (latency == 0) 617 if (latency == 0)
592 latency = 1; 618 latency = 1;
593 619
594 def_sampling_rate = latency * 620 def_sampling_rate =
595 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 621 max(latency * LATENCY_MULTIPLIER,
596 622 MIN_STAT_SAMPLING_RATE);
597 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
598 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
599 623
600 dbs_tuners_ins.sampling_rate = def_sampling_rate; 624 dbs_tuners_ins.sampling_rate = def_sampling_rate;
601 } 625 }
@@ -617,12 +641,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
617 mutex_lock(&dbs_mutex); 641 mutex_lock(&dbs_mutex);
618 if (policy->max < this_dbs_info->cur_policy->cur) 642 if (policy->max < this_dbs_info->cur_policy->cur)
619 __cpufreq_driver_target(this_dbs_info->cur_policy, 643 __cpufreq_driver_target(this_dbs_info->cur_policy,
620 policy->max, 644 policy->max, CPUFREQ_RELATION_H);
621 CPUFREQ_RELATION_H);
622 else if (policy->min > this_dbs_info->cur_policy->cur) 645 else if (policy->min > this_dbs_info->cur_policy->cur)
623 __cpufreq_driver_target(this_dbs_info->cur_policy, 646 __cpufreq_driver_target(this_dbs_info->cur_policy,
624 policy->min, 647 policy->min, CPUFREQ_RELATION_L);
625 CPUFREQ_RELATION_L);
626 mutex_unlock(&dbs_mutex); 648 mutex_unlock(&dbs_mutex);
627 break; 649 break;
628 } 650 }
@@ -677,7 +699,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
677MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 699MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
678MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 700MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
679MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 701MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
680 "Low Latency Frequency Transition capable processors"); 702 "Low Latency Frequency Transition capable processors");
681MODULE_LICENSE("GPL"); 703MODULE_LICENSE("GPL");
682 704
683#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 705#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c0ff97d375d7..5a62d678dd19 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -2,7 +2,7 @@
2 * drivers/cpufreq/cpufreq_stats.c 2 * drivers/cpufreq/cpufreq_stats.c
3 * 3 *
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. 5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -23,7 +23,7 @@
23 23
24static spinlock_t cpufreq_stats_lock; 24static spinlock_t cpufreq_stats_lock;
25 25
26#define CPUFREQ_STATDEVICE_ATTR(_name,_mode,_show) \ 26#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
27static struct freq_attr _attr_##_name = {\ 27static struct freq_attr _attr_##_name = {\
28 .attr = {.name = __stringify(_name), .mode = _mode, }, \ 28 .attr = {.name = __stringify(_name), .mode = _mode, }, \
29 .show = _show,\ 29 .show = _show,\
@@ -50,8 +50,7 @@ struct cpufreq_stats_attribute {
50 ssize_t(*show) (struct cpufreq_stats *, char *); 50 ssize_t(*show) (struct cpufreq_stats *, char *);
51}; 51};
52 52
53static int 53static int cpufreq_stats_update(unsigned int cpu)
54cpufreq_stats_update (unsigned int cpu)
55{ 54{
56 struct cpufreq_stats *stat; 55 struct cpufreq_stats *stat;
57 unsigned long long cur_time; 56 unsigned long long cur_time;
@@ -68,8 +67,7 @@ cpufreq_stats_update (unsigned int cpu)
68 return 0; 67 return 0;
69} 68}
70 69
71static ssize_t 70static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
72show_total_trans(struct cpufreq_policy *policy, char *buf)
73{ 71{
74 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 72 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
75 if (!stat) 73 if (!stat)
@@ -78,8 +76,7 @@ show_total_trans(struct cpufreq_policy *policy, char *buf)
78 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans); 76 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
79} 77}
80 78
81static ssize_t 79static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
82show_time_in_state(struct cpufreq_policy *policy, char *buf)
83{ 80{
84 ssize_t len = 0; 81 ssize_t len = 0;
85 int i; 82 int i;
@@ -89,14 +86,14 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
89 cpufreq_stats_update(stat->cpu); 86 cpufreq_stats_update(stat->cpu);
90 for (i = 0; i < stat->state_num; i++) { 87 for (i = 0; i < stat->state_num; i++) {
91 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], 88 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
92 (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i])); 89 (unsigned long long)
90 cputime64_to_clock_t(stat->time_in_state[i]));
93 } 91 }
94 return len; 92 return len;
95} 93}
96 94
97#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 95#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
98static ssize_t 96static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
99show_trans_table(struct cpufreq_policy *policy, char *buf)
100{ 97{
101 ssize_t len = 0; 98 ssize_t len = 0;
102 int i, j; 99 int i, j;
@@ -139,11 +136,11 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
139 return PAGE_SIZE; 136 return PAGE_SIZE;
140 return len; 137 return len;
141} 138}
142CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table); 139CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
143#endif 140#endif
144 141
145CPUFREQ_STATDEVICE_ATTR(total_trans,0444,show_total_trans); 142CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
146CPUFREQ_STATDEVICE_ATTR(time_in_state,0444,show_time_in_state); 143CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
147 144
148static struct attribute *default_attrs[] = { 145static struct attribute *default_attrs[] = {
149 &_attr_total_trans.attr, 146 &_attr_total_trans.attr,
@@ -158,8 +155,7 @@ static struct attribute_group stats_attr_group = {
158 .name = "stats" 155 .name = "stats"
159}; 156};
160 157
161static int 158static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
162freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
163{ 159{
164 int index; 160 int index;
165 for (index = 0; index < stat->max_state; index++) 161 for (index = 0; index < stat->max_state; index++)
@@ -183,8 +179,7 @@ static void cpufreq_stats_free_table(unsigned int cpu)
183 cpufreq_cpu_put(policy); 179 cpufreq_cpu_put(policy);
184} 180}
185 181
186static int 182static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
187cpufreq_stats_create_table (struct cpufreq_policy *policy,
188 struct cpufreq_frequency_table *table) 183 struct cpufreq_frequency_table *table)
189{ 184{
190 unsigned int i, j, count = 0, ret = 0; 185 unsigned int i, j, count = 0, ret = 0;
@@ -194,7 +189,8 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
194 unsigned int cpu = policy->cpu; 189 unsigned int cpu = policy->cpu;
195 if (per_cpu(cpufreq_stats_table, cpu)) 190 if (per_cpu(cpufreq_stats_table, cpu))
196 return -EBUSY; 191 return -EBUSY;
197 if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL) 192 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
193 if ((stat) == NULL)
198 return -ENOMEM; 194 return -ENOMEM;
199 195
200 data = cpufreq_cpu_get(cpu); 196 data = cpufreq_cpu_get(cpu);
@@ -203,13 +199,14 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
203 goto error_get_fail; 199 goto error_get_fail;
204 } 200 }
205 201
206 if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group))) 202 ret = sysfs_create_group(&data->kobj, &stats_attr_group);
203 if (ret)
207 goto error_out; 204 goto error_out;
208 205
209 stat->cpu = cpu; 206 stat->cpu = cpu;
210 per_cpu(cpufreq_stats_table, cpu) = stat; 207 per_cpu(cpufreq_stats_table, cpu) = stat;
211 208
212 for (i=0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 209 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
213 unsigned int freq = table[i].frequency; 210 unsigned int freq = table[i].frequency;
214 if (freq == CPUFREQ_ENTRY_INVALID) 211 if (freq == CPUFREQ_ENTRY_INVALID)
215 continue; 212 continue;
@@ -255,9 +252,8 @@ error_get_fail:
255 return ret; 252 return ret;
256} 253}
257 254
258static int 255static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
259cpufreq_stat_notifier_policy (struct notifier_block *nb, unsigned long val, 256 unsigned long val, void *data)
260 void *data)
261{ 257{
262 int ret; 258 int ret;
263 struct cpufreq_policy *policy = data; 259 struct cpufreq_policy *policy = data;
@@ -268,14 +264,14 @@ cpufreq_stat_notifier_policy (struct notifier_block *nb, unsigned long val,
268 table = cpufreq_frequency_get_table(cpu); 264 table = cpufreq_frequency_get_table(cpu);
269 if (!table) 265 if (!table)
270 return 0; 266 return 0;
271 if ((ret = cpufreq_stats_create_table(policy, table))) 267 ret = cpufreq_stats_create_table(policy, table);
268 if (ret)
272 return ret; 269 return ret;
273 return 0; 270 return 0;
274} 271}
275 272
276static int 273static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
277cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val, 274 unsigned long val, void *data)
278 void *data)
279{ 275{
280 struct cpufreq_freqs *freq = data; 276 struct cpufreq_freqs *freq = data;
281 struct cpufreq_stats *stat; 277 struct cpufreq_stats *stat;
@@ -340,19 +336,20 @@ static struct notifier_block notifier_trans_block = {
340 .notifier_call = cpufreq_stat_notifier_trans 336 .notifier_call = cpufreq_stat_notifier_trans
341}; 337};
342 338
343static int 339static int __init cpufreq_stats_init(void)
344__init cpufreq_stats_init(void)
345{ 340{
346 int ret; 341 int ret;
347 unsigned int cpu; 342 unsigned int cpu;
348 343
349 spin_lock_init(&cpufreq_stats_lock); 344 spin_lock_init(&cpufreq_stats_lock);
350 if ((ret = cpufreq_register_notifier(&notifier_policy_block, 345 ret = cpufreq_register_notifier(&notifier_policy_block,
351 CPUFREQ_POLICY_NOTIFIER))) 346 CPUFREQ_POLICY_NOTIFIER);
347 if (ret)
352 return ret; 348 return ret;
353 349
354 if ((ret = cpufreq_register_notifier(&notifier_trans_block, 350 ret = cpufreq_register_notifier(&notifier_trans_block,
355 CPUFREQ_TRANSITION_NOTIFIER))) { 351 CPUFREQ_TRANSITION_NOTIFIER);
352 if (ret) {
356 cpufreq_unregister_notifier(&notifier_policy_block, 353 cpufreq_unregister_notifier(&notifier_policy_block,
357 CPUFREQ_POLICY_NOTIFIER); 354 CPUFREQ_POLICY_NOTIFIER);
358 return ret; 355 return ret;
@@ -364,8 +361,7 @@ __init cpufreq_stats_init(void)
364 } 361 }
365 return 0; 362 return 0;
366} 363}
367static void 364static void __exit cpufreq_stats_exit(void)
368__exit cpufreq_stats_exit(void)
369{ 365{
370 unsigned int cpu; 366 unsigned int cpu;
371 367
@@ -379,10 +375,10 @@ __exit cpufreq_stats_exit(void)
379 } 375 }
380} 376}
381 377
382MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 378MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
383MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats " 379MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
384 "through sysfs filesystem"); 380 "through sysfs filesystem");
385MODULE_LICENSE ("GPL"); 381MODULE_LICENSE("GPL");
386 382
387module_init(cpufreq_stats_init); 383module_init(cpufreq_stats_init);
388module_exit(cpufreq_stats_exit); 384module_exit(cpufreq_stats_exit);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 1442bbada053..66d2d1d6c80f 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -24,9 +24,6 @@
24#include <linux/sysfs.h> 24#include <linux/sysfs.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26 26
27#include <asm/uaccess.h>
28
29
30/** 27/**
31 * A few values needed by the userspace governor 28 * A few values needed by the userspace governor
32 */ 29 */
@@ -37,7 +34,7 @@ static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37 userspace */ 34 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed); 35static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
39 36
40static DEFINE_MUTEX (userspace_mutex); 37static DEFINE_MUTEX(userspace_mutex);
41static int cpus_using_userspace_governor; 38static int cpus_using_userspace_governor;
42 39
43#define dprintk(msg...) \ 40#define dprintk(msg...) \
@@ -46,9 +43,9 @@ static int cpus_using_userspace_governor;
46/* keep track of frequency transitions */ 43/* keep track of frequency transitions */
47static int 44static int
48userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 45userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
49 void *data) 46 void *data)
50{ 47{
51 struct cpufreq_freqs *freq = data; 48 struct cpufreq_freqs *freq = data;
52 49
53 if (!per_cpu(cpu_is_managed, freq->cpu)) 50 if (!per_cpu(cpu_is_managed, freq->cpu))
54 return 0; 51 return 0;
@@ -57,11 +54,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
57 freq->cpu, freq->new); 54 freq->cpu, freq->new);
58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new; 55 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
59 56
60 return 0; 57 return 0;
61} 58}
62 59
63static struct notifier_block userspace_cpufreq_notifier_block = { 60static struct notifier_block userspace_cpufreq_notifier_block = {
64 .notifier_call = userspace_cpufreq_notifier 61 .notifier_call = userspace_cpufreq_notifier
65}; 62};
66 63
67 64
@@ -93,8 +90,11 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
93 * We're safe from concurrent calls to ->target() here 90 * We're safe from concurrent calls to ->target() here
94 * as we hold the userspace_mutex lock. If we were calling 91 * as we hold the userspace_mutex lock. If we were calling
95 * cpufreq_driver_target, a deadlock situation might occur: 92 * cpufreq_driver_target, a deadlock situation might occur:
96 * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock) 93 * A: cpufreq_set (lock userspace_mutex) ->
97 * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex) 94 * cpufreq_driver_target(lock policy->lock)
95 * B: cpufreq_set_policy(lock policy->lock) ->
96 * __cpufreq_governor ->
97 * cpufreq_governor_userspace (lock userspace_mutex)
98 */ 98 */
99 ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); 99 ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
100 100
@@ -210,9 +210,10 @@ static void __exit cpufreq_gov_userspace_exit(void)
210} 210}
211 211
212 212
213MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>, Russell King <rmk@arm.linux.org.uk>"); 213MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
214MODULE_DESCRIPTION ("CPUfreq policy governor 'userspace'"); 214 "Russell King <rmk@arm.linux.org.uk>");
215MODULE_LICENSE ("GPL"); 215MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
216MODULE_LICENSE("GPL");
216 217
217#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE 218#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
218fs_initcall(cpufreq_gov_userspace_init); 219fs_initcall(cpufreq_gov_userspace_init);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 9071d80fbba2..a9bd3a05a684 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,7 +28,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
28 unsigned int max_freq = 0; 28 unsigned int max_freq = 0;
29 unsigned int i; 29 unsigned int i;
30 30
31 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 31 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
32 unsigned int freq = table[i].frequency; 32 unsigned int freq = table[i].frequency;
33 if (freq == CPUFREQ_ENTRY_INVALID) { 33 if (freq == CPUFREQ_ENTRY_INVALID) {
34 dprintk("table entry %u is invalid, skipping\n", i); 34 dprintk("table entry %u is invalid, skipping\n", i);
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
70 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 70 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
71 policy->cpuinfo.max_freq); 71 policy->cpuinfo.max_freq);
72 72
73 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 73 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
74 unsigned int freq = table[i].frequency; 74 unsigned int freq = table[i].frequency;
75 if (freq == CPUFREQ_ENTRY_INVALID) 75 if (freq == CPUFREQ_ENTRY_INVALID)
76 continue; 76 continue;
@@ -125,13 +125,13 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
125 if (!cpu_online(policy->cpu)) 125 if (!cpu_online(policy->cpu))
126 return -EINVAL; 126 return -EINVAL;
127 127
128 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 128 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
129 unsigned int freq = table[i].frequency; 129 unsigned int freq = table[i].frequency;
130 if (freq == CPUFREQ_ENTRY_INVALID) 130 if (freq == CPUFREQ_ENTRY_INVALID)
131 continue; 131 continue;
132 if ((freq < policy->min) || (freq > policy->max)) 132 if ((freq < policy->min) || (freq > policy->max))
133 continue; 133 continue;
134 switch(relation) { 134 switch (relation) {
135 case CPUFREQ_RELATION_H: 135 case CPUFREQ_RELATION_H:
136 if (freq <= target_freq) { 136 if (freq <= target_freq) {
137 if (freq >= optimal.frequency) { 137 if (freq >= optimal.frequency) {
@@ -178,7 +178,7 @@ static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
178/** 178/**
179 * show_available_freqs - show available frequencies for the specified CPU 179 * show_available_freqs - show available frequencies for the specified CPU
180 */ 180 */
181static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf) 181static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
182{ 182{
183 unsigned int i = 0; 183 unsigned int i = 0;
184 unsigned int cpu = policy->cpu; 184 unsigned int cpu = policy->cpu;
@@ -190,7 +190,7 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
190 190
191 table = per_cpu(show_table, cpu); 191 table = per_cpu(show_table, cpu);
192 192
193 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
195 continue; 195 continue;
196 count += sprintf(&buf[count], "%d ", table[i].frequency); 196 count += sprintf(&buf[count], "%d ", table[i].frequency);
@@ -234,6 +234,6 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
234} 234}
235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
236 236
237MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>"); 237MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
238MODULE_DESCRIPTION ("CPUfreq frequency table helpers"); 238MODULE_DESCRIPTION("CPUfreq frequency table helpers");
239MODULE_LICENSE ("GPL"); 239MODULE_LICENSE("GPL");