aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-06-13 17:33:17 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-06-13 17:33:17 -0400
commitbb4b9933e2bc0554cf4db37aa07b19ff69a85f8f (patch)
tree92ec230e0292874d6a53ff33534e8637fa315479
parent5edb56491d4812c42175980759da53388e5d86f5 (diff)
parentf6709b8aa78fb6765c443ad6b70fdaf48b89d95d (diff)
Merge back earlier cpufreq changes for v4.8.
-rw-r--r--Documentation/cpu-freq/core.txt4
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt10
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c72
-rw-r--r--drivers/cpufreq/Kconfig13
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c10
-rw-r--r--drivers/cpufreq/cpufreq.c186
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c51
-rw-r--r--drivers/cpufreq/cpufreq_governor.c73
-rw-r--r--drivers/cpufreq/cpufreq_governor.h24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c40
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h1
-rw-r--r--drivers/cpufreq/cpufreq_performance.c19
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c19
-rw-r--r--drivers/cpufreq/cpufreq_stats.c157
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c104
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c22
-rw-r--r--drivers/cpufreq/freq_table.c37
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c5
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c3
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c33
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c8
-rw-r--r--drivers/thermal/cpu_cooling.c24
-rw-r--r--include/linux/cpufreq.h41
-rw-r--r--kernel/sched/cpufreq_schedutil.c43
24 files changed, 403 insertions, 596 deletions
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index ba78e7c2a069..4bc7287806de 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -96,7 +96,7 @@ new - new frequency
96For details about OPP, see Documentation/power/opp.txt 96For details about OPP, see Documentation/power/opp.txt
97 97
98dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with 98dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
99 cpufreq_frequency_table_cpuinfo which is provided with the list of 99 cpufreq_table_validate_and_show() which is provided with the list of
100 frequencies that are available for operation. This function provides 100 frequencies that are available for operation. This function provides
101 a ready to use conversion routine to translate the OPP layer's internal 101 a ready to use conversion routine to translate the OPP layer's internal
102 information about the available frequencies into a format readily 102 information about the available frequencies into a format readily
@@ -110,7 +110,7 @@ dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
110 /* Do things */ 110 /* Do things */
111 r = dev_pm_opp_init_cpufreq_table(dev, &freq_table); 111 r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
112 if (!r) 112 if (!r)
113 cpufreq_frequency_table_cpuinfo(policy, freq_table); 113 cpufreq_table_validate_and_show(policy, freq_table);
114 /* Do other things */ 114 /* Do other things */
115 } 115 }
116 116
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 14f4e6336d88..772b94fde264 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -231,7 +231,7 @@ if you want to skip one entry in the table, set the frequency to
231CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending 231CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
232order. 232order.
233 233
234By calling cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 234By calling cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
235 struct cpufreq_frequency_table *table); 235 struct cpufreq_frequency_table *table);
236the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and 236the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
237policy->min and policy->max are set to the same values. This is 237policy->min and policy->max are set to the same values. This is
@@ -244,14 +244,12 @@ policy->max, and all other criteria are met. This is helpful for the
244->verify call. 244->verify call.
245 245
246int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 246int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
247 struct cpufreq_frequency_table *table,
248 unsigned int target_freq, 247 unsigned int target_freq,
249 unsigned int relation, 248 unsigned int relation);
250 unsigned int *index);
251 249
252is the corresponding frequency table helper for the ->target 250is the corresponding frequency table helper for the ->target
253stage. Just pass the values to this function, and the unsigned int 251stage. Just pass the values to this function, and this function
254index returns the number of the frequency table entry which contains 252returns the number of the frequency table entry which contains
255the frequency the CPU shall be set to. 253the frequency the CPU shall be set to.
256 254
257The following macros can be used as iterators over cpufreq_frequency_table: 255The following macros can be used as iterators over cpufreq_frequency_table:
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 82607d621aca..88301e53f085 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -85,61 +85,57 @@ static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
85 cancel_delayed_work_sync(&info->work); 85 cancel_delayed_work_sync(&info->work);
86} 86}
87 87
88static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event) 88static int spu_gov_start(struct cpufreq_policy *policy)
89{ 89{
90 unsigned int cpu = policy->cpu; 90 unsigned int cpu = policy->cpu;
91 struct spu_gov_info_struct *info, *affected_info; 91 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
92 struct spu_gov_info_struct *affected_info;
92 int i; 93 int i;
93 int ret = 0;
94 94
95 info = &per_cpu(spu_gov_info, cpu); 95 if (!cpu_online(cpu)) {
96 96 printk(KERN_ERR "cpu %d is not online\n", cpu);
97 switch (event) { 97 return -EINVAL;
98 case CPUFREQ_GOV_START: 98 }
99 if (!cpu_online(cpu)) {
100 printk(KERN_ERR "cpu %d is not online\n", cpu);
101 ret = -EINVAL;
102 break;
103 }
104 99
105 if (!policy->cur) { 100 if (!policy->cur) {
106 printk(KERN_ERR "no cpu specified in policy\n"); 101 printk(KERN_ERR "no cpu specified in policy\n");
107 ret = -EINVAL; 102 return -EINVAL;
108 break; 103 }
109 }
110 104
111 /* initialize spu_gov_info for all affected cpus */ 105 /* initialize spu_gov_info for all affected cpus */
112 for_each_cpu(i, policy->cpus) { 106 for_each_cpu(i, policy->cpus) {
113 affected_info = &per_cpu(spu_gov_info, i); 107 affected_info = &per_cpu(spu_gov_info, i);
114 affected_info->policy = policy; 108 affected_info->policy = policy;
115 } 109 }
116 110
117 info->poll_int = POLL_TIME; 111 info->poll_int = POLL_TIME;
118 112
119 /* setup timer */ 113 /* setup timer */
120 spu_gov_init_work(info); 114 spu_gov_init_work(info);
121 115
122 break; 116 return 0;
117}
123 118
124 case CPUFREQ_GOV_STOP: 119static void spu_gov_stop(struct cpufreq_policy *policy)
125 /* cancel timer */ 120{
126 spu_gov_cancel_work(info); 121 unsigned int cpu = policy->cpu;
122 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
123 int i;
127 124
128 /* clean spu_gov_info for all affected cpus */ 125 /* cancel timer */
129 for_each_cpu (i, policy->cpus) { 126 spu_gov_cancel_work(info);
130 info = &per_cpu(spu_gov_info, i);
131 info->policy = NULL;
132 }
133 127
134 break; 128 /* clean spu_gov_info for all affected cpus */
129 for_each_cpu (i, policy->cpus) {
130 info = &per_cpu(spu_gov_info, i);
131 info->policy = NULL;
135 } 132 }
136
137 return ret;
138} 133}
139 134
140static struct cpufreq_governor spu_governor = { 135static struct cpufreq_governor spu_governor = {
141 .name = "spudemand", 136 .name = "spudemand",
142 .governor = spu_gov_govern, 137 .start = spu_gov_start,
138 .stop = spu_gov_stop,
143 .owner = THIS_MODULE, 139 .owner = THIS_MODULE,
144}; 140};
145 141
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b7445b6ae5a4..c822d72629d5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -31,23 +31,18 @@ config CPU_FREQ_BOOST_SW
31 depends on THERMAL 31 depends on THERMAL
32 32
33config CPU_FREQ_STAT 33config CPU_FREQ_STAT
34 tristate "CPU frequency translation statistics" 34 bool "CPU frequency transition statistics"
35 default y 35 default y
36 help 36 help
37 This driver exports CPU frequency statistics information through sysfs 37 Export CPU frequency statistics information through sysfs.
38 file system.
39
40 To compile this driver as a module, choose M here: the
41 module will be called cpufreq_stats.
42 38
43 If in doubt, say N. 39 If in doubt, say N.
44 40
45config CPU_FREQ_STAT_DETAILS 41config CPU_FREQ_STAT_DETAILS
46 bool "CPU frequency translation statistics details" 42 bool "CPU frequency transition statistics details"
47 depends on CPU_FREQ_STAT 43 depends on CPU_FREQ_STAT
48 help 44 help
49 This will show detail CPU frequency translation table in sysfs file 45 Show detailed CPU frequency transition table in sysfs.
50 system.
51 46
52 If in doubt, say N. 47 If in doubt, say N.
53 48
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 404360cad25c..6d5dc04c3a37 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
48 struct policy_dbs_info *policy_dbs = policy->governor_data; 48 struct policy_dbs_info *policy_dbs = policy->governor_data;
49 struct dbs_data *od_data = policy_dbs->dbs_data; 49 struct dbs_data *od_data = policy_dbs->dbs_data;
50 struct od_dbs_tuners *od_tuners = od_data->tuners; 50 struct od_dbs_tuners *od_tuners = od_data->tuners;
51 struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
52 51
53 if (!od_info->freq_table) 52 if (!policy->freq_table)
54 return freq_next; 53 return freq_next;
55 54
56 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, 55 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
@@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
92 else { 91 else {
93 unsigned int index; 92 unsigned int index;
94 93
95 cpufreq_frequency_table_target(policy, 94 index = cpufreq_frequency_table_target(policy,
96 od_info->freq_table, policy->cur - 1, 95 policy->cur - 1, CPUFREQ_RELATION_H);
97 CPUFREQ_RELATION_H, &index); 96 freq_next = policy->freq_table[index].frequency;
98 freq_next = od_info->freq_table[index].frequency;
99 } 97 }
100 98
101 data->freq_prev = freq_next; 99 data->freq_prev = freq_next;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9009295f5134..9ae58a18ccb9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,19 +74,12 @@ static inline bool has_target(void)
74} 74}
75 75
76/* internal prototypes */ 76/* internal prototypes */
77static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
78static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 77static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78static int cpufreq_init_governor(struct cpufreq_policy *policy);
79static void cpufreq_exit_governor(struct cpufreq_policy *policy);
79static int cpufreq_start_governor(struct cpufreq_policy *policy); 80static int cpufreq_start_governor(struct cpufreq_policy *policy);
80 81static void cpufreq_stop_governor(struct cpufreq_policy *policy);
81static inline void cpufreq_exit_governor(struct cpufreq_policy *policy) 82static void cpufreq_governor_limits(struct cpufreq_policy *policy);
82{
83 (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
84}
85
86static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
87{
88 (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
89}
90 83
91/** 84/**
92 * Two notifier lists: the "policy" list is involved in the 85 * Two notifier lists: the "policy" list is involved in the
@@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
133} 126}
134EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 127EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
135 128
136struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
137{
138 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
139
140 return policy && !policy_is_inactive(policy) ?
141 policy->freq_table : NULL;
142}
143EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
144
145static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 129static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
146{ 130{
147 u64 idle_time; 131 u64 idle_time;
@@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
354 pr_debug("FREQ: %lu - CPU: %lu\n", 338 pr_debug("FREQ: %lu - CPU: %lu\n",
355 (unsigned long)freqs->new, (unsigned long)freqs->cpu); 339 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
356 trace_cpu_frequency(freqs->new, freqs->cpu); 340 trace_cpu_frequency(freqs->new, freqs->cpu);
341 cpufreq_stats_record_transition(policy, freqs->new);
357 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 342 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 CPUFREQ_POSTCHANGE, freqs); 343 CPUFREQ_POSTCHANGE, freqs);
359 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 344 if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -1115,6 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1115 CPUFREQ_REMOVE_POLICY, policy); 1100 CPUFREQ_REMOVE_POLICY, policy);
1116 1101
1117 down_write(&policy->rwsem); 1102 down_write(&policy->rwsem);
1103 cpufreq_stats_free_table(policy);
1118 cpufreq_remove_dev_symlink(policy); 1104 cpufreq_remove_dev_symlink(policy);
1119 kobj = &policy->kobj; 1105 kobj = &policy->kobj;
1120 cmp = &policy->kobj_unregister; 1106 cmp = &policy->kobj_unregister;
@@ -1265,13 +1251,12 @@ static int cpufreq_online(unsigned int cpu)
1265 } 1251 }
1266 } 1252 }
1267 1253
1268 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1269 CPUFREQ_START, policy);
1270
1271 if (new_policy) { 1254 if (new_policy) {
1272 ret = cpufreq_add_dev_interface(policy); 1255 ret = cpufreq_add_dev_interface(policy);
1273 if (ret) 1256 if (ret)
1274 goto out_exit_policy; 1257 goto out_exit_policy;
1258
1259 cpufreq_stats_create_table(policy);
1275 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1260 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1276 CPUFREQ_CREATE_POLICY, policy); 1261 CPUFREQ_CREATE_POLICY, policy);
1277 1262
@@ -1280,6 +1265,9 @@ static int cpufreq_online(unsigned int cpu)
1280 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1265 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1281 } 1266 }
1282 1267
1268 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1269 CPUFREQ_START, policy);
1270
1283 ret = cpufreq_init_policy(policy); 1271 ret = cpufreq_init_policy(policy);
1284 if (ret) { 1272 if (ret) {
1285 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 1273 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1864,14 +1852,17 @@ static int __target_intermediate(struct cpufreq_policy *policy,
1864 return ret; 1852 return ret;
1865} 1853}
1866 1854
1867static int __target_index(struct cpufreq_policy *policy, 1855static int __target_index(struct cpufreq_policy *policy, int index)
1868 struct cpufreq_frequency_table *freq_table, int index)
1869{ 1856{
1870 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 1857 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1871 unsigned int intermediate_freq = 0; 1858 unsigned int intermediate_freq = 0;
1859 unsigned int newfreq = policy->freq_table[index].frequency;
1872 int retval = -EINVAL; 1860 int retval = -EINVAL;
1873 bool notify; 1861 bool notify;
1874 1862
1863 if (newfreq == policy->cur)
1864 return 0;
1865
1875 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 1866 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1876 if (notify) { 1867 if (notify) {
1877 /* Handle switching to intermediate frequency */ 1868 /* Handle switching to intermediate frequency */
@@ -1886,7 +1877,7 @@ static int __target_index(struct cpufreq_policy *policy,
1886 freqs.old = freqs.new; 1877 freqs.old = freqs.new;
1887 } 1878 }
1888 1879
1889 freqs.new = freq_table[index].frequency; 1880 freqs.new = newfreq;
1890 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 1881 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1891 __func__, policy->cpu, freqs.old, freqs.new); 1882 __func__, policy->cpu, freqs.old, freqs.new);
1892 1883
@@ -1923,17 +1914,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1923 unsigned int relation) 1914 unsigned int relation)
1924{ 1915{
1925 unsigned int old_target_freq = target_freq; 1916 unsigned int old_target_freq = target_freq;
1926 struct cpufreq_frequency_table *freq_table; 1917 int index;
1927 int index, retval;
1928 1918
1929 if (cpufreq_disabled()) 1919 if (cpufreq_disabled())
1930 return -ENODEV; 1920 return -ENODEV;
1931 1921
1932 /* Make sure that target_freq is within supported range */ 1922 /* Make sure that target_freq is within supported range */
1933 if (target_freq > policy->max) 1923 target_freq = clamp_val(target_freq, policy->min, policy->max);
1934 target_freq = policy->max;
1935 if (target_freq < policy->min)
1936 target_freq = policy->min;
1937 1924
1938 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 1925 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1939 policy->cpu, target_freq, relation, old_target_freq); 1926 policy->cpu, target_freq, relation, old_target_freq);
@@ -1956,23 +1943,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1956 if (!cpufreq_driver->target_index) 1943 if (!cpufreq_driver->target_index)
1957 return -EINVAL; 1944 return -EINVAL;
1958 1945
1959 freq_table = cpufreq_frequency_get_table(policy->cpu); 1946 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1960 if (unlikely(!freq_table)) {
1961 pr_err("%s: Unable to find freq_table\n", __func__);
1962 return -EINVAL;
1963 }
1964
1965 retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
1966 relation, &index);
1967 if (unlikely(retval)) {
1968 pr_err("%s: Unable to find matching freq\n", __func__);
1969 return retval;
1970 }
1971
1972 if (freq_table[index].frequency == policy->cur)
1973 return 0;
1974 1947
1975 return __target_index(policy, freq_table, index); 1948 return __target_index(policy, index);
1976} 1949}
1977EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1950EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1978 1951
@@ -1997,7 +1970,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1997 return NULL; 1970 return NULL;
1998} 1971}
1999 1972
2000static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1973static int cpufreq_init_governor(struct cpufreq_policy *policy)
2001{ 1974{
2002 int ret; 1975 int ret;
2003 1976
@@ -2025,36 +1998,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
2025 } 1998 }
2026 } 1999 }
2027 2000
2028 if (event == CPUFREQ_GOV_POLICY_INIT) 2001 if (!try_module_get(policy->governor->owner))
2029 if (!try_module_get(policy->governor->owner)) 2002 return -EINVAL;
2030 return -EINVAL;
2031
2032 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2033 2003
2034 ret = policy->governor->governor(policy, event); 2004 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2035 2005
2036 if (event == CPUFREQ_GOV_POLICY_INIT) { 2006 if (policy->governor->init) {
2037 if (ret) 2007 ret = policy->governor->init(policy);
2008 if (ret) {
2038 module_put(policy->governor->owner); 2009 module_put(policy->governor->owner);
2039 else 2010 return ret;
2040 policy->governor->initialized++; 2011 }
2041 } else if (event == CPUFREQ_GOV_POLICY_EXIT) {
2042 policy->governor->initialized--;
2043 module_put(policy->governor->owner);
2044 } 2012 }
2045 2013
2046 return ret; 2014 return 0;
2015}
2016
2017static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2018{
2019 if (cpufreq_suspended || !policy->governor)
2020 return;
2021
2022 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2023
2024 if (policy->governor->exit)
2025 policy->governor->exit(policy);
2026
2027 module_put(policy->governor->owner);
2047} 2028}
2048 2029
2049static int cpufreq_start_governor(struct cpufreq_policy *policy) 2030static int cpufreq_start_governor(struct cpufreq_policy *policy)
2050{ 2031{
2051 int ret; 2032 int ret;
2052 2033
2034 if (cpufreq_suspended)
2035 return 0;
2036
2037 if (!policy->governor)
2038 return -EINVAL;
2039
2040 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2041
2053 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) 2042 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2054 cpufreq_update_current_freq(policy); 2043 cpufreq_update_current_freq(policy);
2055 2044
2056 ret = cpufreq_governor(policy, CPUFREQ_GOV_START); 2045 if (policy->governor->start) {
2057 return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 2046 ret = policy->governor->start(policy);
2047 if (ret)
2048 return ret;
2049 }
2050
2051 if (policy->governor->limits)
2052 policy->governor->limits(policy);
2053
2054 return 0;
2055}
2056
2057static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2058{
2059 if (cpufreq_suspended || !policy->governor)
2060 return;
2061
2062 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2063
2064 if (policy->governor->stop)
2065 policy->governor->stop(policy);
2066}
2067
2068static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2069{
2070 if (cpufreq_suspended || !policy->governor)
2071 return;
2072
2073 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2074
2075 if (policy->governor->limits)
2076 policy->governor->limits(policy);
2058} 2077}
2059 2078
2060int cpufreq_register_governor(struct cpufreq_governor *governor) 2079int cpufreq_register_governor(struct cpufreq_governor *governor)
@@ -2069,7 +2088,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
2069 2088
2070 mutex_lock(&cpufreq_governor_mutex); 2089 mutex_lock(&cpufreq_governor_mutex);
2071 2090
2072 governor->initialized = 0;
2073 err = -EBUSY; 2091 err = -EBUSY;
2074 if (!find_governor(governor->name)) { 2092 if (!find_governor(governor->name)) {
2075 err = 0; 2093 err = 0;
@@ -2195,7 +2213,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2195 2213
2196 if (new_policy->governor == policy->governor) { 2214 if (new_policy->governor == policy->governor) {
2197 pr_debug("cpufreq: governor limits update\n"); 2215 pr_debug("cpufreq: governor limits update\n");
2198 return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 2216 cpufreq_governor_limits(policy);
2217 return 0;
2199 } 2218 }
2200 2219
2201 pr_debug("governor switch\n"); 2220 pr_debug("governor switch\n");
@@ -2210,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2210 2229
2211 /* start new governor */ 2230 /* start new governor */
2212 policy->governor = new_policy->governor; 2231 policy->governor = new_policy->governor;
2213 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); 2232 ret = cpufreq_init_governor(policy);
2214 if (!ret) { 2233 if (!ret) {
2215 ret = cpufreq_start_governor(policy); 2234 ret = cpufreq_start_governor(policy);
2216 if (!ret) { 2235 if (!ret) {
@@ -2224,7 +2243,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2224 pr_debug("starting governor %s failed\n", policy->governor->name); 2243 pr_debug("starting governor %s failed\n", policy->governor->name);
2225 if (old_gov) { 2244 if (old_gov) {
2226 policy->governor = old_gov; 2245 policy->governor = old_gov;
2227 if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) 2246 if (cpufreq_init_governor(policy))
2228 policy->governor = NULL; 2247 policy->governor = NULL;
2229 else 2248 else
2230 cpufreq_start_governor(policy); 2249 cpufreq_start_governor(policy);
@@ -2305,26 +2324,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
2305 *********************************************************************/ 2324 *********************************************************************/
2306static int cpufreq_boost_set_sw(int state) 2325static int cpufreq_boost_set_sw(int state)
2307{ 2326{
2308 struct cpufreq_frequency_table *freq_table;
2309 struct cpufreq_policy *policy; 2327 struct cpufreq_policy *policy;
2310 int ret = -EINVAL; 2328 int ret = -EINVAL;
2311 2329
2312 for_each_active_policy(policy) { 2330 for_each_active_policy(policy) {
2313 freq_table = cpufreq_frequency_get_table(policy->cpu); 2331 if (!policy->freq_table)
2314 if (freq_table) { 2332 continue;
2315 ret = cpufreq_frequency_table_cpuinfo(policy, 2333
2316 freq_table); 2334 ret = cpufreq_frequency_table_cpuinfo(policy,
2317 if (ret) { 2335 policy->freq_table);
2318 pr_err("%s: Policy frequency update failed\n", 2336 if (ret) {
2319 __func__); 2337 pr_err("%s: Policy frequency update failed\n",
2320 break; 2338 __func__);
2321 } 2339 break;
2322
2323 down_write(&policy->rwsem);
2324 policy->user_policy.max = policy->max;
2325 cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2326 up_write(&policy->rwsem);
2327 } 2340 }
2341
2342 down_write(&policy->rwsem);
2343 policy->user_policy.max = policy->max;
2344 cpufreq_governor_limits(policy);
2345 up_write(&policy->rwsem);
2328 } 2346 }
2329 2347
2330 return ret; 2348 return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 316df247e00d..f967ec6c5720 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -127,7 +127,6 @@ static struct notifier_block cs_cpufreq_notifier_block = {
127}; 127};
128 128
129/************************** sysfs interface ************************/ 129/************************** sysfs interface ************************/
130static struct dbs_governor cs_dbs_gov;
131 130
132static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, 131static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
133 const char *buf, size_t count) 132 const char *buf, size_t count)
@@ -255,6 +254,13 @@ static struct attribute *cs_attributes[] = {
255 254
256/************************** sysfs end ************************/ 255/************************** sysfs end ************************/
257 256
257struct cs_governor {
258 struct dbs_governor dbs_gov;
259 unsigned int usage_count;
260};
261
262static struct cs_governor cs_gov;
263
258static struct policy_dbs_info *cs_alloc(void) 264static struct policy_dbs_info *cs_alloc(void)
259{ 265{
260 struct cs_policy_dbs_info *dbs_info; 266 struct cs_policy_dbs_info *dbs_info;
@@ -268,15 +274,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs)
268 kfree(to_dbs_info(policy_dbs)); 274 kfree(to_dbs_info(policy_dbs));
269} 275}
270 276
271static int cs_init(struct dbs_data *dbs_data, bool notify) 277static int cs_init(struct dbs_data *dbs_data)
272{ 278{
273 struct cs_dbs_tuners *tuners; 279 struct cs_dbs_tuners *tuners;
274 280
275 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); 281 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
276 if (!tuners) { 282 if (!tuners)
277 pr_err("%s: kzalloc failed\n", __func__);
278 return -ENOMEM; 283 return -ENOMEM;
279 }
280 284
281 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; 285 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
282 tuners->freq_step = DEF_FREQUENCY_STEP; 286 tuners->freq_step = DEF_FREQUENCY_STEP;
@@ -288,16 +292,22 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
288 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * 292 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
289 jiffies_to_usecs(10); 293 jiffies_to_usecs(10);
290 294
291 if (notify) 295 /*
296 * This function and cs_exit() are only called under gov_dbs_data_mutex
297 * which is global, so the cs_gov.usage_count accesses are guaranteed
298 * to be serialized.
299 */
300 if (!cs_gov.usage_count++)
292 cpufreq_register_notifier(&cs_cpufreq_notifier_block, 301 cpufreq_register_notifier(&cs_cpufreq_notifier_block,
293 CPUFREQ_TRANSITION_NOTIFIER); 302 CPUFREQ_TRANSITION_NOTIFIER);
294 303
295 return 0; 304 return 0;
296} 305}
297 306
298static void cs_exit(struct dbs_data *dbs_data, bool notify) 307static void cs_exit(struct dbs_data *dbs_data)
299{ 308{
300 if (notify) 309 /* Protected by gov_dbs_data_mutex - see the comment in cs_init(). */
310 if (!--cs_gov.usage_count)
301 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block, 311 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
302 CPUFREQ_TRANSITION_NOTIFIER); 312 CPUFREQ_TRANSITION_NOTIFIER);
303 313
@@ -312,23 +322,20 @@ static void cs_start(struct cpufreq_policy *policy)
312 dbs_info->requested_freq = policy->cur; 322 dbs_info->requested_freq = policy->cur;
313} 323}
314 324
315static struct dbs_governor cs_dbs_gov = { 325static struct cs_governor cs_gov = {
316 .gov = { 326 .dbs_gov = {
317 .name = "conservative", 327 .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
318 .governor = cpufreq_governor_dbs, 328 .kobj_type = { .default_attrs = cs_attributes },
319 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 329 .gov_dbs_timer = cs_dbs_timer,
320 .owner = THIS_MODULE, 330 .alloc = cs_alloc,
331 .free = cs_free,
332 .init = cs_init,
333 .exit = cs_exit,
334 .start = cs_start,
321 }, 335 },
322 .kobj_type = { .default_attrs = cs_attributes },
323 .gov_dbs_timer = cs_dbs_timer,
324 .alloc = cs_alloc,
325 .free = cs_free,
326 .init = cs_init,
327 .exit = cs_exit,
328 .start = cs_start,
329}; 336};
330 337
331#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov) 338#define CPU_FREQ_GOV_CONSERVATIVE (&cs_gov.dbs_gov.gov)
332 339
333static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 340static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
334 void *data) 341 void *data)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index be498d56dd69..e415349ab31b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
336 synchronize_sched(); 336 synchronize_sched();
337} 337}
338 338
339static void gov_cancel_work(struct cpufreq_policy *policy)
340{
341 struct policy_dbs_info *policy_dbs = policy->governor_data;
342
343 gov_clear_update_util(policy_dbs->policy);
344 irq_work_sync(&policy_dbs->irq_work);
345 cancel_work_sync(&policy_dbs->work);
346 atomic_set(&policy_dbs->work_count, 0);
347 policy_dbs->work_in_progress = false;
348}
349
350static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, 339static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
351 struct dbs_governor *gov) 340 struct dbs_governor *gov)
352{ 341{
@@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
389 gov->free(policy_dbs); 378 gov->free(policy_dbs);
390} 379}
391 380
392static int cpufreq_governor_init(struct cpufreq_policy *policy) 381int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
393{ 382{
394 struct dbs_governor *gov = dbs_governor_of(policy); 383 struct dbs_governor *gov = dbs_governor_of(policy);
395 struct dbs_data *dbs_data; 384 struct dbs_data *dbs_data;
@@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
429 418
430 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); 419 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
431 420
432 ret = gov->init(dbs_data, !policy->governor->initialized); 421 ret = gov->init(dbs_data);
433 if (ret) 422 if (ret)
434 goto free_policy_dbs_info; 423 goto free_policy_dbs_info;
435 424
@@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
458 goto out; 447 goto out;
459 448
460 /* Failure, so roll back. */ 449 /* Failure, so roll back. */
461 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret); 450 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
462 451
463 policy->governor_data = NULL; 452 policy->governor_data = NULL;
464 453
465 if (!have_governor_per_policy()) 454 if (!have_governor_per_policy())
466 gov->gdbs_data = NULL; 455 gov->gdbs_data = NULL;
467 gov->exit(dbs_data, !policy->governor->initialized); 456 gov->exit(dbs_data);
468 kfree(dbs_data); 457 kfree(dbs_data);
469 458
470free_policy_dbs_info: 459free_policy_dbs_info:
@@ -474,8 +463,9 @@ out:
474 mutex_unlock(&gov_dbs_data_mutex); 463 mutex_unlock(&gov_dbs_data_mutex);
475 return ret; 464 return ret;
476} 465}
466EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
477 467
478static int cpufreq_governor_exit(struct cpufreq_policy *policy) 468void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
479{ 469{
480 struct dbs_governor *gov = dbs_governor_of(policy); 470 struct dbs_governor *gov = dbs_governor_of(policy);
481 struct policy_dbs_info *policy_dbs = policy->governor_data; 471 struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
493 if (!have_governor_per_policy()) 483 if (!have_governor_per_policy())
494 gov->gdbs_data = NULL; 484 gov->gdbs_data = NULL;
495 485
496 gov->exit(dbs_data, policy->governor->initialized == 1); 486 gov->exit(dbs_data);
497 kfree(dbs_data); 487 kfree(dbs_data);
498 } 488 }
499 489
500 free_policy_dbs_info(policy_dbs, gov); 490 free_policy_dbs_info(policy_dbs, gov);
501 491
502 mutex_unlock(&gov_dbs_data_mutex); 492 mutex_unlock(&gov_dbs_data_mutex);
503 return 0;
504} 493}
494EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
505 495
506static int cpufreq_governor_start(struct cpufreq_policy *policy) 496int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
507{ 497{
508 struct dbs_governor *gov = dbs_governor_of(policy); 498 struct dbs_governor *gov = dbs_governor_of(policy);
509 struct policy_dbs_info *policy_dbs = policy->governor_data; 499 struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
539 gov_set_update_util(policy_dbs, sampling_rate); 529 gov_set_update_util(policy_dbs, sampling_rate);
540 return 0; 530 return 0;
541} 531}
532EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
542 533
543static int cpufreq_governor_stop(struct cpufreq_policy *policy) 534void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
544{ 535{
545 gov_cancel_work(policy); 536 struct policy_dbs_info *policy_dbs = policy->governor_data;
546 return 0; 537
538 gov_clear_update_util(policy_dbs->policy);
539 irq_work_sync(&policy_dbs->irq_work);
540 cancel_work_sync(&policy_dbs->work);
541 atomic_set(&policy_dbs->work_count, 0);
542 policy_dbs->work_in_progress = false;
547} 543}
544EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
548 545
549static int cpufreq_governor_limits(struct cpufreq_policy *policy) 546void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
550{ 547{
551 struct policy_dbs_info *policy_dbs = policy->governor_data; 548 struct policy_dbs_info *policy_dbs = policy->governor_data;
552 549
553 mutex_lock(&policy_dbs->timer_mutex); 550 mutex_lock(&policy_dbs->timer_mutex);
554 551 cpufreq_policy_apply_limits(policy);
555 if (policy->max < policy->cur)
556 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
557 else if (policy->min > policy->cur)
558 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
559
560 gov_update_sample_delay(policy_dbs, 0); 552 gov_update_sample_delay(policy_dbs, 0);
561 553
562 mutex_unlock(&policy_dbs->timer_mutex); 554 mutex_unlock(&policy_dbs->timer_mutex);
563
564 return 0;
565}
566
567int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
568{
569 if (event == CPUFREQ_GOV_POLICY_INIT) {
570 return cpufreq_governor_init(policy);
571 } else if (policy->governor_data) {
572 switch (event) {
573 case CPUFREQ_GOV_POLICY_EXIT:
574 return cpufreq_governor_exit(policy);
575 case CPUFREQ_GOV_START:
576 return cpufreq_governor_start(policy);
577 case CPUFREQ_GOV_STOP:
578 return cpufreq_governor_stop(policy);
579 case CPUFREQ_GOV_LIMITS:
580 return cpufreq_governor_limits(policy);
581 }
582 }
583 return -EINVAL;
584} 555}
585EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); 556EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34eb214b6d57..ef1037e9c92b 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -138,8 +138,8 @@ struct dbs_governor {
138 unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy); 138 unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
139 struct policy_dbs_info *(*alloc)(void); 139 struct policy_dbs_info *(*alloc)(void);
140 void (*free)(struct policy_dbs_info *policy_dbs); 140 void (*free)(struct policy_dbs_info *policy_dbs);
141 int (*init)(struct dbs_data *dbs_data, bool notify); 141 int (*init)(struct dbs_data *dbs_data);
142 void (*exit)(struct dbs_data *dbs_data, bool notify); 142 void (*exit)(struct dbs_data *dbs_data);
143 void (*start)(struct cpufreq_policy *policy); 143 void (*start)(struct cpufreq_policy *policy);
144}; 144};
145 145
@@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy
148 return container_of(policy->governor, struct dbs_governor, gov); 148 return container_of(policy->governor, struct dbs_governor, gov);
149} 149}
150 150
151/* Governor callback routines */
152int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
153void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
154int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
155void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
156void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
157
158#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
159 { \
160 .name = _name_, \
161 .max_transition_latency = TRANSITION_LATENCY_LIMIT, \
162 .owner = THIS_MODULE, \
163 .init = cpufreq_dbs_governor_init, \
164 .exit = cpufreq_dbs_governor_exit, \
165 .start = cpufreq_dbs_governor_start, \
166 .stop = cpufreq_dbs_governor_stop, \
167 .limits = cpufreq_dbs_governor_limits, \
168 }
169
151/* Governor specific operations */ 170/* Governor specific operations */
152struct od_ops { 171struct od_ops {
153 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, 172 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
@@ -155,7 +174,6 @@ struct od_ops {
155}; 174};
156 175
157unsigned int dbs_update(struct cpufreq_policy *policy); 176unsigned int dbs_update(struct cpufreq_policy *policy);
158int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
159void od_register_powersave_bias_handler(unsigned int (*f) 177void od_register_powersave_bias_handler(unsigned int (*f)
160 (struct cpufreq_policy *, unsigned int, unsigned int), 178 (struct cpufreq_policy *, unsigned int, unsigned int),
161 unsigned int powersave_bias); 179 unsigned int powersave_bias);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 300163430516..0c93cd9dee99 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -65,34 +65,32 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
65{ 65{
66 unsigned int freq_req, freq_reduc, freq_avg; 66 unsigned int freq_req, freq_reduc, freq_avg;
67 unsigned int freq_hi, freq_lo; 67 unsigned int freq_hi, freq_lo;
68 unsigned int index = 0; 68 unsigned int index;
69 unsigned int delay_hi_us; 69 unsigned int delay_hi_us;
70 struct policy_dbs_info *policy_dbs = policy->governor_data; 70 struct policy_dbs_info *policy_dbs = policy->governor_data;
71 struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); 71 struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
72 struct dbs_data *dbs_data = policy_dbs->dbs_data; 72 struct dbs_data *dbs_data = policy_dbs->dbs_data;
73 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 73 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
74 struct cpufreq_frequency_table *freq_table = policy->freq_table;
74 75
75 if (!dbs_info->freq_table) { 76 if (!freq_table) {
76 dbs_info->freq_lo = 0; 77 dbs_info->freq_lo = 0;
77 dbs_info->freq_lo_delay_us = 0; 78 dbs_info->freq_lo_delay_us = 0;
78 return freq_next; 79 return freq_next;
79 } 80 }
80 81
81 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, 82 index = cpufreq_frequency_table_target(policy, freq_next, relation);
82 relation, &index); 83 freq_req = freq_table[index].frequency;
83 freq_req = dbs_info->freq_table[index].frequency;
84 freq_reduc = freq_req * od_tuners->powersave_bias / 1000; 84 freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
85 freq_avg = freq_req - freq_reduc; 85 freq_avg = freq_req - freq_reduc;
86 86
87 /* Find freq bounds for freq_avg in freq_table */ 87 /* Find freq bounds for freq_avg in freq_table */
88 index = 0; 88 index = cpufreq_frequency_table_target(policy, freq_avg,
89 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 89 CPUFREQ_RELATION_H);
90 CPUFREQ_RELATION_H, &index); 90 freq_lo = freq_table[index].frequency;
91 freq_lo = dbs_info->freq_table[index].frequency; 91 index = cpufreq_frequency_table_target(policy, freq_avg,
92 index = 0; 92 CPUFREQ_RELATION_L);
93 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, 93 freq_hi = freq_table[index].frequency;
94 CPUFREQ_RELATION_L, &index);
95 freq_hi = dbs_info->freq_table[index].frequency;
96 94
97 /* Find out how long we have to be in hi and lo freqs */ 95 /* Find out how long we have to be in hi and lo freqs */
98 if (freq_hi == freq_lo) { 96 if (freq_hi == freq_lo) {
@@ -113,7 +111,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
113{ 111{
114 struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); 112 struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
115 113
116 dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
117 dbs_info->freq_lo = 0; 114 dbs_info->freq_lo = 0;
118} 115}
119 116
@@ -361,17 +358,15 @@ static void od_free(struct policy_dbs_info *policy_dbs)
361 kfree(to_dbs_info(policy_dbs)); 358 kfree(to_dbs_info(policy_dbs));
362} 359}
363 360
364static int od_init(struct dbs_data *dbs_data, bool notify) 361static int od_init(struct dbs_data *dbs_data)
365{ 362{
366 struct od_dbs_tuners *tuners; 363 struct od_dbs_tuners *tuners;
367 u64 idle_time; 364 u64 idle_time;
368 int cpu; 365 int cpu;
369 366
370 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); 367 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
371 if (!tuners) { 368 if (!tuners)
372 pr_err("%s: kzalloc failed\n", __func__);
373 return -ENOMEM; 369 return -ENOMEM;
374 }
375 370
376 cpu = get_cpu(); 371 cpu = get_cpu();
377 idle_time = get_cpu_idle_time_us(cpu, NULL); 372 idle_time = get_cpu_idle_time_us(cpu, NULL);
@@ -402,7 +397,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
402 return 0; 397 return 0;
403} 398}
404 399
405static void od_exit(struct dbs_data *dbs_data, bool notify) 400static void od_exit(struct dbs_data *dbs_data)
406{ 401{
407 kfree(dbs_data->tuners); 402 kfree(dbs_data->tuners);
408} 403}
@@ -420,12 +415,7 @@ static struct od_ops od_ops = {
420}; 415};
421 416
422static struct dbs_governor od_dbs_gov = { 417static struct dbs_governor od_dbs_gov = {
423 .gov = { 418 .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
424 .name = "ondemand",
425 .governor = cpufreq_governor_dbs,
426 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
427 .owner = THIS_MODULE,
428 },
429 .kobj_type = { .default_attrs = od_attributes }, 419 .kobj_type = { .default_attrs = od_attributes },
430 .gov_dbs_timer = od_dbs_timer, 420 .gov_dbs_timer = od_dbs_timer,
431 .alloc = od_alloc, 421 .alloc = od_alloc,
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index f0121db3cd9e..640ea4e97106 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -13,7 +13,6 @@
13 13
14struct od_policy_dbs_info { 14struct od_policy_dbs_info {
15 struct policy_dbs_info policy_dbs; 15 struct policy_dbs_info policy_dbs;
16 struct cpufreq_frequency_table *freq_table;
17 unsigned int freq_lo; 16 unsigned int freq_lo;
18 unsigned int freq_lo_delay_us; 17 unsigned int freq_lo_delay_us;
19 unsigned int freq_hi_delay_us; 18 unsigned int freq_hi_delay_us;
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index af9f4b96f5a8..dafb679adc58 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -16,27 +16,16 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19static int cpufreq_governor_performance(struct cpufreq_policy *policy, 19static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
20 unsigned int event)
21{ 20{
22 switch (event) { 21 pr_debug("setting to %u kHz\n", policy->max);
23 case CPUFREQ_GOV_START: 22 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
24 case CPUFREQ_GOV_LIMITS:
25 pr_debug("setting to %u kHz because of event %u\n",
26 policy->max, event);
27 __cpufreq_driver_target(policy, policy->max,
28 CPUFREQ_RELATION_H);
29 break;
30 default:
31 break;
32 }
33 return 0;
34} 23}
35 24
36static struct cpufreq_governor cpufreq_gov_performance = { 25static struct cpufreq_governor cpufreq_gov_performance = {
37 .name = "performance", 26 .name = "performance",
38 .governor = cpufreq_governor_performance,
39 .owner = THIS_MODULE, 27 .owner = THIS_MODULE,
28 .limits = cpufreq_gov_performance_limits,
40}; 29};
41 30
42static int __init cpufreq_gov_performance_init(void) 31static int __init cpufreq_gov_performance_init(void)
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index b8b400232a74..78a651038faf 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -16,26 +16,15 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 19static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
20 unsigned int event)
21{ 20{
22 switch (event) { 21 pr_debug("setting to %u kHz\n", policy->min);
23 case CPUFREQ_GOV_START: 22 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
24 case CPUFREQ_GOV_LIMITS:
25 pr_debug("setting to %u kHz because of event %u\n",
26 policy->min, event);
27 __cpufreq_driver_target(policy, policy->min,
28 CPUFREQ_RELATION_L);
29 break;
30 default:
31 break;
32 }
33 return 0;
34} 23}
35 24
36static struct cpufreq_governor cpufreq_gov_powersave = { 25static struct cpufreq_governor cpufreq_gov_powersave = {
37 .name = "powersave", 26 .name = "powersave",
38 .governor = cpufreq_governor_powersave, 27 .limits = cpufreq_gov_powersave_limits,
39 .owner = THIS_MODULE, 28 .owner = THIS_MODULE,
40}; 29};
41 30
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 5e370a30a964..06d3abdffd3a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -15,7 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/cputime.h> 16#include <linux/cputime.h>
17 17
18static spinlock_t cpufreq_stats_lock; 18static DEFINE_SPINLOCK(cpufreq_stats_lock);
19 19
20struct cpufreq_stats { 20struct cpufreq_stats {
21 unsigned int total_trans; 21 unsigned int total_trans;
@@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
52 ssize_t len = 0; 52 ssize_t len = 0;
53 int i; 53 int i;
54 54
55 if (policy->fast_switch_enabled)
56 return 0;
57
55 cpufreq_stats_update(stats); 58 cpufreq_stats_update(stats);
56 for (i = 0; i < stats->state_num; i++) { 59 for (i = 0; i < stats->state_num; i++) {
57 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], 60 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
68 ssize_t len = 0; 71 ssize_t len = 0;
69 int i, j; 72 int i, j;
70 73
74 if (policy->fast_switch_enabled)
75 return 0;
76
71 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); 77 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
72 len += snprintf(buf + len, PAGE_SIZE - len, " : "); 78 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
73 for (i = 0; i < stats->state_num; i++) { 79 for (i = 0; i < stats->state_num; i++) {
@@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
130 return -1; 136 return -1;
131} 137}
132 138
133static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) 139void cpufreq_stats_free_table(struct cpufreq_policy *policy)
134{ 140{
135 struct cpufreq_stats *stats = policy->stats; 141 struct cpufreq_stats *stats = policy->stats;
136 142
@@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
146 policy->stats = NULL; 152 policy->stats = NULL;
147} 153}
148 154
149static void cpufreq_stats_free_table(unsigned int cpu) 155void cpufreq_stats_create_table(struct cpufreq_policy *policy)
150{
151 struct cpufreq_policy *policy;
152
153 policy = cpufreq_cpu_get(cpu);
154 if (!policy)
155 return;
156
157 __cpufreq_stats_free_table(policy);
158
159 cpufreq_cpu_put(policy);
160}
161
162static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
163{ 156{
164 unsigned int i = 0, count = 0, ret = -ENOMEM; 157 unsigned int i = 0, count = 0, ret = -ENOMEM;
165 struct cpufreq_stats *stats; 158 struct cpufreq_stats *stats;
166 unsigned int alloc_size; 159 unsigned int alloc_size;
167 unsigned int cpu = policy->cpu;
168 struct cpufreq_frequency_table *pos, *table; 160 struct cpufreq_frequency_table *pos, *table;
169 161
170 /* We need cpufreq table for creating stats table */ 162 /* We need cpufreq table for creating stats table */
171 table = cpufreq_frequency_get_table(cpu); 163 table = policy->freq_table;
172 if (unlikely(!table)) 164 if (unlikely(!table))
173 return 0; 165 return;
174 166
175 /* stats already initialized */ 167 /* stats already initialized */
176 if (policy->stats) 168 if (policy->stats)
177 return -EEXIST; 169 return;
178 170
179 stats = kzalloc(sizeof(*stats), GFP_KERNEL); 171 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
180 if (!stats) 172 if (!stats)
181 return -ENOMEM; 173 return;
182 174
183 /* Find total allocation size */ 175 /* Find total allocation size */
184 cpufreq_for_each_valid_entry(pos, table) 176 cpufreq_for_each_valid_entry(pos, table)
@@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
215 policy->stats = stats; 207 policy->stats = stats;
216 ret = sysfs_create_group(&policy->kobj, &stats_attr_group); 208 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
217 if (!ret) 209 if (!ret)
218 return 0; 210 return;
219 211
220 /* We failed, release resources */ 212 /* We failed, release resources */
221 policy->stats = NULL; 213 policy->stats = NULL;
222 kfree(stats->time_in_state); 214 kfree(stats->time_in_state);
223free_stat: 215free_stat:
224 kfree(stats); 216 kfree(stats);
225
226 return ret;
227}
228
229static void cpufreq_stats_create_table(unsigned int cpu)
230{
231 struct cpufreq_policy *policy;
232
233 /*
234 * "likely(!policy)" because normally cpufreq_stats will be registered
235 * before cpufreq driver
236 */
237 policy = cpufreq_cpu_get(cpu);
238 if (likely(!policy))
239 return;
240
241 __cpufreq_stats_create_table(policy);
242
243 cpufreq_cpu_put(policy);
244} 217}
245 218
246static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 219void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
247 unsigned long val, void *data) 220 unsigned int new_freq)
248{ 221{
249 int ret = 0; 222 struct cpufreq_stats *stats = policy->stats;
250 struct cpufreq_policy *policy = data;
251
252 if (val == CPUFREQ_CREATE_POLICY)
253 ret = __cpufreq_stats_create_table(policy);
254 else if (val == CPUFREQ_REMOVE_POLICY)
255 __cpufreq_stats_free_table(policy);
256
257 return ret;
258}
259
260static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
261 unsigned long val, void *data)
262{
263 struct cpufreq_freqs *freq = data;
264 struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
265 struct cpufreq_stats *stats;
266 int old_index, new_index; 223 int old_index, new_index;
267 224
268 if (!policy) { 225 if (!stats) {
269 pr_err("%s: No policy found\n", __func__);
270 return 0;
271 }
272
273 if (val != CPUFREQ_POSTCHANGE)
274 goto put_policy;
275
276 if (!policy->stats) {
277 pr_debug("%s: No stats found\n", __func__); 226 pr_debug("%s: No stats found\n", __func__);
278 goto put_policy; 227 return;
279 } 228 }
280 229
281 stats = policy->stats;
282
283 old_index = stats->last_index; 230 old_index = stats->last_index;
284 new_index = freq_table_get_index(stats, freq->new); 231 new_index = freq_table_get_index(stats, new_freq);
285 232
286 /* We can't do stats->time_in_state[-1]= .. */ 233 /* We can't do stats->time_in_state[-1]= .. */
287 if (old_index == -1 || new_index == -1) 234 if (old_index == -1 || new_index == -1 || old_index == new_index)
288 goto put_policy; 235 return;
289
290 if (old_index == new_index)
291 goto put_policy;
292 236
293 cpufreq_stats_update(stats); 237 cpufreq_stats_update(stats);
294 238
@@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
297 stats->trans_table[old_index * stats->max_state + new_index]++; 241 stats->trans_table[old_index * stats->max_state + new_index]++;
298#endif 242#endif
299 stats->total_trans++; 243 stats->total_trans++;
300
301put_policy:
302 cpufreq_cpu_put(policy);
303 return 0;
304} 244}
305
306static struct notifier_block notifier_policy_block = {
307 .notifier_call = cpufreq_stat_notifier_policy
308};
309
310static struct notifier_block notifier_trans_block = {
311 .notifier_call = cpufreq_stat_notifier_trans
312};
313
314static int __init cpufreq_stats_init(void)
315{
316 int ret;
317 unsigned int cpu;
318
319 spin_lock_init(&cpufreq_stats_lock);
320 ret = cpufreq_register_notifier(&notifier_policy_block,
321 CPUFREQ_POLICY_NOTIFIER);
322 if (ret)
323 return ret;
324
325 for_each_online_cpu(cpu)
326 cpufreq_stats_create_table(cpu);
327
328 ret = cpufreq_register_notifier(&notifier_trans_block,
329 CPUFREQ_TRANSITION_NOTIFIER);
330 if (ret) {
331 cpufreq_unregister_notifier(&notifier_policy_block,
332 CPUFREQ_POLICY_NOTIFIER);
333 for_each_online_cpu(cpu)
334 cpufreq_stats_free_table(cpu);
335 return ret;
336 }
337
338 return 0;
339}
340static void __exit cpufreq_stats_exit(void)
341{
342 unsigned int cpu;
343
344 cpufreq_unregister_notifier(&notifier_policy_block,
345 CPUFREQ_POLICY_NOTIFIER);
346 cpufreq_unregister_notifier(&notifier_trans_block,
347 CPUFREQ_TRANSITION_NOTIFIER);
348 for_each_online_cpu(cpu)
349 cpufreq_stats_free_table(cpu);
350}
351
352MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
353MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
354MODULE_LICENSE("GPL");
355
356module_init(cpufreq_stats_init);
357module_exit(cpufreq_stats_exit);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 9f3dec9a3f36..bd897e3e134d 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
65 return 0; 65 return 0;
66} 66}
67 67
68static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 68static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
69 unsigned int event) 69{
70 mutex_lock(&userspace_mutex);
71 kfree(policy->governor_data);
72 policy->governor_data = NULL;
73 mutex_unlock(&userspace_mutex);
74}
75
76static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
70{ 77{
71 unsigned int *setspeed = policy->governor_data; 78 unsigned int *setspeed = policy->governor_data;
72 unsigned int cpu = policy->cpu;
73 int rc = 0;
74 79
75 if (event == CPUFREQ_GOV_POLICY_INIT) 80 BUG_ON(!policy->cur);
76 return cpufreq_userspace_policy_init(policy); 81 pr_debug("started managing cpu %u\n", policy->cpu);
77 82
78 if (!setspeed) 83 mutex_lock(&userspace_mutex);
79 return -EINVAL; 84 per_cpu(cpu_is_managed, policy->cpu) = 1;
80 85 *setspeed = policy->cur;
81 switch (event) { 86 mutex_unlock(&userspace_mutex);
82 case CPUFREQ_GOV_POLICY_EXIT: 87 return 0;
83 mutex_lock(&userspace_mutex); 88}
84 policy->governor_data = NULL; 89
85 kfree(setspeed); 90static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
86 mutex_unlock(&userspace_mutex); 91{
87 break; 92 unsigned int *setspeed = policy->governor_data;
88 case CPUFREQ_GOV_START: 93
89 BUG_ON(!policy->cur); 94 pr_debug("managing cpu %u stopped\n", policy->cpu);
90 pr_debug("started managing cpu %u\n", cpu); 95
91 96 mutex_lock(&userspace_mutex);
92 mutex_lock(&userspace_mutex); 97 per_cpu(cpu_is_managed, policy->cpu) = 0;
93 per_cpu(cpu_is_managed, cpu) = 1; 98 *setspeed = 0;
94 *setspeed = policy->cur; 99 mutex_unlock(&userspace_mutex);
95 mutex_unlock(&userspace_mutex); 100}
96 break; 101
97 case CPUFREQ_GOV_STOP: 102static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
98 pr_debug("managing cpu %u stopped\n", cpu); 103{
99 104 unsigned int *setspeed = policy->governor_data;
100 mutex_lock(&userspace_mutex); 105
101 per_cpu(cpu_is_managed, cpu) = 0; 106 mutex_lock(&userspace_mutex);
102 *setspeed = 0; 107
103 mutex_unlock(&userspace_mutex); 108 pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
104 break; 109 policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
105 case CPUFREQ_GOV_LIMITS: 110
106 mutex_lock(&userspace_mutex); 111 if (policy->max < *setspeed)
107 pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", 112 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
108 cpu, policy->min, policy->max, policy->cur, *setspeed); 113 else if (policy->min > *setspeed)
109 114 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
110 if (policy->max < *setspeed) 115 else
111 __cpufreq_driver_target(policy, policy->max, 116 __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
112 CPUFREQ_RELATION_H); 117
113 else if (policy->min > *setspeed) 118 mutex_unlock(&userspace_mutex);
114 __cpufreq_driver_target(policy, policy->min,
115 CPUFREQ_RELATION_L);
116 else
117 __cpufreq_driver_target(policy, *setspeed,
118 CPUFREQ_RELATION_L);
119 mutex_unlock(&userspace_mutex);
120 break;
121 }
122 return rc;
123} 119}
124 120
125static struct cpufreq_governor cpufreq_gov_userspace = { 121static struct cpufreq_governor cpufreq_gov_userspace = {
126 .name = "userspace", 122 .name = "userspace",
127 .governor = cpufreq_governor_userspace, 123 .init = cpufreq_userspace_policy_init,
124 .exit = cpufreq_userspace_policy_exit,
125 .start = cpufreq_userspace_policy_start,
126 .stop = cpufreq_userspace_policy_stop,
127 .limits = cpufreq_userspace_policy_limits,
128 .store_setspeed = cpufreq_set, 128 .store_setspeed = cpufreq_set,
129 .show_setspeed = show_speed, 129 .show_setspeed = show_speed,
130 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7e336d20c184..b95a872800ec 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -38,26 +38,6 @@ struct davinci_cpufreq {
38}; 38};
39static struct davinci_cpufreq cpufreq; 39static struct davinci_cpufreq cpufreq;
40 40
41static int davinci_verify_speed(struct cpufreq_policy *policy)
42{
43 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
44 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
45 struct clk *armclk = cpufreq.armclk;
46
47 if (freq_table)
48 return cpufreq_frequency_table_verify(policy, freq_table);
49
50 if (policy->cpu)
51 return -EINVAL;
52
53 cpufreq_verify_within_cpu_limits(policy);
54 policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
55 policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
56 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
57 policy->cpuinfo.max_freq);
58 return 0;
59}
60
61static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) 41static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
62{ 42{
63 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; 43 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
121 101
122static struct cpufreq_driver davinci_driver = { 102static struct cpufreq_driver davinci_driver = {
123 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 103 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
124 .verify = davinci_verify_speed, 104 .verify = cpufreq_generic_frequency_table_verify,
125 .target_index = davinci_target, 105 .target_index = davinci_target,
126 .get = cpufreq_generic_get, 106 .get = cpufreq_generic_get,
127 .init = davinci_cpu_init, 107 .init = davinci_cpu_init,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a8f1daffc9bc..eac8bcbdaad1 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
63 else 63 else
64 return 0; 64 return 0;
65} 65}
66EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
67
68 66
69int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, 67int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
70 struct cpufreq_frequency_table *table) 68 struct cpufreq_frequency_table *table)
@@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
108 */ 106 */
109int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) 107int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
110{ 108{
111 struct cpufreq_frequency_table *table = 109 if (!policy->freq_table)
112 cpufreq_frequency_get_table(policy->cpu);
113 if (!table)
114 return -ENODEV; 110 return -ENODEV;
115 111
116 return cpufreq_frequency_table_verify(policy, table); 112 return cpufreq_frequency_table_verify(policy, policy->freq_table);
117} 113}
118EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); 114EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
119 115
120int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 116int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
121 struct cpufreq_frequency_table *table, 117 unsigned int target_freq,
122 unsigned int target_freq, 118 unsigned int relation)
123 unsigned int relation,
124 unsigned int *index)
125{ 119{
126 struct cpufreq_frequency_table optimal = { 120 struct cpufreq_frequency_table optimal = {
127 .driver_data = ~0, 121 .driver_data = ~0,
@@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
132 .frequency = 0, 126 .frequency = 0,
133 }; 127 };
134 struct cpufreq_frequency_table *pos; 128 struct cpufreq_frequency_table *pos;
129 struct cpufreq_frequency_table *table = policy->freq_table;
135 unsigned int freq, diff, i = 0; 130 unsigned int freq, diff, i = 0;
131 int index;
136 132
137 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", 133 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
138 target_freq, relation, policy->cpu); 134 target_freq, relation, policy->cpu);
@@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
196 } 192 }
197 } 193 }
198 if (optimal.driver_data > i) { 194 if (optimal.driver_data > i) {
199 if (suboptimal.driver_data > i) 195 if (suboptimal.driver_data > i) {
200 return -EINVAL; 196 WARN(1, "Invalid frequency table: %d\n", policy->cpu);
201 *index = suboptimal.driver_data; 197 return 0;
202 } else 198 }
203 *index = optimal.driver_data;
204 199
205 pr_debug("target index is %u, freq is:%u kHz\n", *index, 200 index = suboptimal.driver_data;
206 table[*index].frequency); 201 } else
202 index = optimal.driver_data;
207 203
208 return 0; 204 pr_debug("target index is %u, freq is:%u kHz\n", index,
205 table[index].frequency);
206 return index;
209} 207}
210EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); 208EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
211 209
212int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 210int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
213 unsigned int freq) 211 unsigned int freq)
214{ 212{
215 struct cpufreq_frequency_table *pos, *table; 213 struct cpufreq_frequency_table *pos, *table = policy->freq_table;
216 214
217 table = cpufreq_frequency_get_table(policy->cpu);
218 if (unlikely(!table)) { 215 if (unlikely(!table)) {
219 pr_debug("%s: Unable to find frequency table\n", __func__); 216 pr_debug("%s: Unable to find frequency table\n", __func__);
220 return -ENOENT; 217 return -ENOENT;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c45368e3f1..b29c5c20c3a1 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -760,9 +760,8 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
760 struct cpufreq_policy policy; 760 struct cpufreq_policy policy;
761 761
762 cpufreq_get_policy(&policy, cpu); 762 cpufreq_get_policy(&policy, cpu);
763 cpufreq_frequency_table_target(&policy, policy.freq_table, 763 index = cpufreq_frequency_table_target(&policy, policy.cur,
764 policy.cur, 764 CPUFREQ_RELATION_C);
765 CPUFREQ_RELATION_C, &index);
766 powernv_cpufreq_target_index(&policy, index); 765 powernv_cpufreq_target_index(&policy, index);
767 cpumask_andnot(&mask, &mask, policy.cpus); 766 cpumask_andnot(&mask, &mask, policy.cpus);
768 } 767 }
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7c4cd5c634f2..dc112481a408 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb,
94 unsigned long event, void *data) 94 unsigned long event, void *data)
95{ 95{
96 struct cpufreq_policy *policy = data; 96 struct cpufreq_policy *policy = data;
97 struct cpufreq_frequency_table *cbe_freqs; 97 struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
98 u8 node; 98 u8 node;
99 99
100 /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY 100 /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
@@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb,
103 if (event == CPUFREQ_START) 103 if (event == CPUFREQ_START)
104 return 0; 104 return 0;
105 105
106 cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
107 node = cbe_cpu_to_node(policy->cpu); 106 node = cbe_cpu_to_node(policy->cpu);
108 107
109 pr_debug("got notified, event=%lu, node=%u\n", event, node); 108 pr_debug("got notified, event=%lu, node=%u\n", event, node);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index ae8eaed77b70..7b596fa38ad2 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
293 __func__, policy, target_freq, relation); 293 __func__, policy, target_freq, relation);
294 294
295 if (ftab) { 295 if (ftab) {
296 if (cpufreq_frequency_table_target(policy, ftab, 296 index = cpufreq_frequency_table_target(policy, target_freq,
297 target_freq, relation, 297 relation);
298 &index)) {
299 s3c_freq_dbg("%s: table failed\n", __func__);
300 return -EINVAL;
301 }
302 298
303 s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__, 299 s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
304 target_freq, index, ftab[index].frequency); 300 target_freq, index, ftab[index].frequency);
@@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
315 pll = NULL; 311 pll = NULL;
316 } else { 312 } else {
317 struct cpufreq_policy tmp_policy; 313 struct cpufreq_policy tmp_policy;
318 int ret;
319 314
320 /* we keep the cpu pll table in Hz, to ensure we get an 315 /* we keep the cpu pll table in Hz, to ensure we get an
321 * accurate value for the PLL output. */ 316 * accurate value for the PLL output. */
@@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
323 tmp_policy.min = policy->min * 1000; 318 tmp_policy.min = policy->min * 1000;
324 tmp_policy.max = policy->max * 1000; 319 tmp_policy.max = policy->max * 1000;
325 tmp_policy.cpu = policy->cpu; 320 tmp_policy.cpu = policy->cpu;
321 tmp_policy.freq_table = pll_reg;
326 322
327 /* cpufreq_frequency_table_target uses a pointer to 'index' 323 /* cpufreq_frequency_table_target returns the index
328 * which is the number of the table entry, not the value of 324 * of the table entry, not the value of
329 * the table entry's index field. */ 325 * the table entry's index field. */
330 326
331 ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg, 327 index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
332 target_freq, relation, 328 relation);
333 &index);
334
335 if (ret < 0) {
336 pr_err("%s: no PLL available\n", __func__);
337 goto err_notpossible;
338 }
339
340 pll = pll_reg + index; 329 pll = pll_reg + index;
341 330
342 s3c_freq_dbg("%s: target %u => %u\n", 331 s3c_freq_dbg("%s: target %u => %u\n",
@@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
346 } 335 }
347 336
348 return s3c_cpufreq_settarget(policy, target_freq, pll); 337 return s3c_cpufreq_settarget(policy, target_freq, pll);
349
350 err_notpossible:
351 pr_err("no compatible settings for %d\n", target_freq);
352 return -EINVAL;
353} 338}
354 339
355struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) 340struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
@@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void)
571{ 556{
572 int size, ret; 557 int size, ret;
573 558
574 if (!cpu_cur.info->calc_freqtable)
575 return -EINVAL;
576
577 kfree(ftab); 559 kfree(ftab);
578 ftab = NULL;
579 560
580 size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); 561 size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
581 size++; 562 size++;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 06d85917b6d5..4f4e9df9b7fc 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -246,12 +246,8 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
246 new_freq = s5pv210_freq_table[index].frequency; 246 new_freq = s5pv210_freq_table[index].frequency;
247 247
248 /* Finding current running level index */ 248 /* Finding current running level index */
249 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, 249 priv_index = cpufreq_frequency_table_target(policy, old_freq,
250 old_freq, CPUFREQ_RELATION_H, 250 CPUFREQ_RELATION_H);
251 &priv_index)) {
252 ret = -EINVAL;
253 goto exit;
254 }
255 251
256 arm_volt = dvs_conf[index].arm_volt; 252 arm_volt = dvs_conf[index].arm_volt;
257 int_volt = dvs_conf[index].int_volt; 253 int_volt = dvs_conf[index].int_volt;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 5b4b47ed948b..3788ed74c9ab 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -787,22 +787,34 @@ __cpufreq_cooling_register(struct device_node *np,
787 const struct cpumask *clip_cpus, u32 capacitance, 787 const struct cpumask *clip_cpus, u32 capacitance,
788 get_static_t plat_static_func) 788 get_static_t plat_static_func)
789{ 789{
790 struct cpufreq_policy *policy;
790 struct thermal_cooling_device *cool_dev; 791 struct thermal_cooling_device *cool_dev;
791 struct cpufreq_cooling_device *cpufreq_dev; 792 struct cpufreq_cooling_device *cpufreq_dev;
792 char dev_name[THERMAL_NAME_LENGTH]; 793 char dev_name[THERMAL_NAME_LENGTH];
793 struct cpufreq_frequency_table *pos, *table; 794 struct cpufreq_frequency_table *pos, *table;
795 struct cpumask temp_mask;
794 unsigned int freq, i, num_cpus; 796 unsigned int freq, i, num_cpus;
795 int ret; 797 int ret;
796 798
797 table = cpufreq_frequency_get_table(cpumask_first(clip_cpus)); 799 cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
800 policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
801 if (!policy) {
802 pr_debug("%s: CPUFreq policy not found\n", __func__);
803 return ERR_PTR(-EPROBE_DEFER);
804 }
805
806 table = policy->freq_table;
798 if (!table) { 807 if (!table) {
799 pr_debug("%s: CPUFreq table not found\n", __func__); 808 pr_debug("%s: CPUFreq table not found\n", __func__);
800 return ERR_PTR(-EPROBE_DEFER); 809 cool_dev = ERR_PTR(-ENODEV);
810 goto put_policy;
801 } 811 }
802 812
803 cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL); 813 cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
804 if (!cpufreq_dev) 814 if (!cpufreq_dev) {
805 return ERR_PTR(-ENOMEM); 815 cool_dev = ERR_PTR(-ENOMEM);
816 goto put_policy;
817 }
806 818
807 num_cpus = cpumask_weight(clip_cpus); 819 num_cpus = cpumask_weight(clip_cpus);
808 cpufreq_dev->time_in_idle = kcalloc(num_cpus, 820 cpufreq_dev->time_in_idle = kcalloc(num_cpus,
@@ -892,7 +904,7 @@ __cpufreq_cooling_register(struct device_node *np,
892 CPUFREQ_POLICY_NOTIFIER); 904 CPUFREQ_POLICY_NOTIFIER);
893 mutex_unlock(&cooling_cpufreq_lock); 905 mutex_unlock(&cooling_cpufreq_lock);
894 906
895 return cool_dev; 907 goto put_policy;
896 908
897remove_idr: 909remove_idr:
898 release_idr(&cpufreq_idr, cpufreq_dev->id); 910 release_idr(&cpufreq_idr, cpufreq_dev->id);
@@ -906,6 +918,8 @@ free_time_in_idle:
906 kfree(cpufreq_dev->time_in_idle); 918 kfree(cpufreq_dev->time_in_idle);
907free_cdev: 919free_cdev:
908 kfree(cpufreq_dev); 920 kfree(cpufreq_dev);
921put_policy:
922 cpufreq_cpu_put(policy);
909 923
910 return cool_dev; 924 return cool_dev;
911} 925}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4e81e08db752..c378776628b4 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -185,6 +185,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
185static inline void disable_cpufreq(void) { } 185static inline void disable_cpufreq(void) { }
186#endif 186#endif
187 187
188#ifdef CONFIG_CPU_FREQ_STAT
189void cpufreq_stats_create_table(struct cpufreq_policy *policy);
190void cpufreq_stats_free_table(struct cpufreq_policy *policy);
191void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
192 unsigned int new_freq);
193#else
194static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
195static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
196static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
197 unsigned int new_freq) { }
198#endif /* CONFIG_CPU_FREQ_STAT */
199
188/********************************************************************* 200/*********************************************************************
189 * CPUFREQ DRIVER INTERFACE * 201 * CPUFREQ DRIVER INTERFACE *
190 *********************************************************************/ 202 *********************************************************************/
@@ -455,18 +467,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
455#define MIN_LATENCY_MULTIPLIER (20) 467#define MIN_LATENCY_MULTIPLIER (20)
456#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 468#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
457 469
458/* Governor Events */
459#define CPUFREQ_GOV_START 1
460#define CPUFREQ_GOV_STOP 2
461#define CPUFREQ_GOV_LIMITS 3
462#define CPUFREQ_GOV_POLICY_INIT 4
463#define CPUFREQ_GOV_POLICY_EXIT 5
464
465struct cpufreq_governor { 470struct cpufreq_governor {
466 char name[CPUFREQ_NAME_LEN]; 471 char name[CPUFREQ_NAME_LEN];
467 int initialized; 472 int (*init)(struct cpufreq_policy *policy);
468 int (*governor) (struct cpufreq_policy *policy, 473 void (*exit)(struct cpufreq_policy *policy);
469 unsigned int event); 474 int (*start)(struct cpufreq_policy *policy);
475 void (*stop)(struct cpufreq_policy *policy);
476 void (*limits)(struct cpufreq_policy *policy);
470 ssize_t (*show_setspeed) (struct cpufreq_policy *policy, 477 ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
471 char *buf); 478 char *buf);
472 int (*store_setspeed) (struct cpufreq_policy *policy, 479 int (*store_setspeed) (struct cpufreq_policy *policy,
@@ -493,6 +500,14 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
493struct cpufreq_governor *cpufreq_default_governor(void); 500struct cpufreq_governor *cpufreq_default_governor(void);
494struct cpufreq_governor *cpufreq_fallback_governor(void); 501struct cpufreq_governor *cpufreq_fallback_governor(void);
495 502
503static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
504{
505 if (policy->max < policy->cur)
506 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
507 else if (policy->min > policy->cur)
508 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
509}
510
496/* Governor attribute set */ 511/* Governor attribute set */
497struct gov_attr_set { 512struct gov_attr_set {
498 struct kobject kobj; 513 struct kobject kobj;
@@ -583,10 +598,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
583int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); 598int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
584 599
585int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 600int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
586 struct cpufreq_frequency_table *table,
587 unsigned int target_freq, 601 unsigned int target_freq,
588 unsigned int relation, 602 unsigned int relation);
589 unsigned int *index);
590int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 603int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
591 unsigned int freq); 604 unsigned int freq);
592 605
@@ -617,8 +630,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
617 return false; 630 return false;
618} 631}
619#endif 632#endif
620/* the following funtion is for cpufreq core use only */
621struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
622 633
623/* the following are really really optional */ 634/* the following are really really optional */
624extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 635extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 14c4aa25cc45..758efd7f3abe 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -394,7 +394,7 @@ static int sugov_init(struct cpufreq_policy *policy)
394 return ret; 394 return ret;
395} 395}
396 396
397static int sugov_exit(struct cpufreq_policy *policy) 397static void sugov_exit(struct cpufreq_policy *policy)
398{ 398{
399 struct sugov_policy *sg_policy = policy->governor_data; 399 struct sugov_policy *sg_policy = policy->governor_data;
400 struct sugov_tunables *tunables = sg_policy->tunables; 400 struct sugov_tunables *tunables = sg_policy->tunables;
@@ -412,7 +412,6 @@ static int sugov_exit(struct cpufreq_policy *policy)
412 mutex_unlock(&global_tunables_lock); 412 mutex_unlock(&global_tunables_lock);
413 413
414 sugov_policy_free(sg_policy); 414 sugov_policy_free(sg_policy);
415 return 0;
416} 415}
417 416
418static int sugov_start(struct cpufreq_policy *policy) 417static int sugov_start(struct cpufreq_policy *policy)
@@ -444,7 +443,7 @@ static int sugov_start(struct cpufreq_policy *policy)
444 return 0; 443 return 0;
445} 444}
446 445
447static int sugov_stop(struct cpufreq_policy *policy) 446static void sugov_stop(struct cpufreq_policy *policy)
448{ 447{
449 struct sugov_policy *sg_policy = policy->governor_data; 448 struct sugov_policy *sg_policy = policy->governor_data;
450 unsigned int cpu; 449 unsigned int cpu;
@@ -456,53 +455,29 @@ static int sugov_stop(struct cpufreq_policy *policy)
456 455
457 irq_work_sync(&sg_policy->irq_work); 456 irq_work_sync(&sg_policy->irq_work);
458 cancel_work_sync(&sg_policy->work); 457 cancel_work_sync(&sg_policy->work);
459 return 0;
460} 458}
461 459
462static int sugov_limits(struct cpufreq_policy *policy) 460static void sugov_limits(struct cpufreq_policy *policy)
463{ 461{
464 struct sugov_policy *sg_policy = policy->governor_data; 462 struct sugov_policy *sg_policy = policy->governor_data;
465 463
466 if (!policy->fast_switch_enabled) { 464 if (!policy->fast_switch_enabled) {
467 mutex_lock(&sg_policy->work_lock); 465 mutex_lock(&sg_policy->work_lock);
468 466 cpufreq_policy_apply_limits(policy);
469 if (policy->max < policy->cur)
470 __cpufreq_driver_target(policy, policy->max,
471 CPUFREQ_RELATION_H);
472 else if (policy->min > policy->cur)
473 __cpufreq_driver_target(policy, policy->min,
474 CPUFREQ_RELATION_L);
475
476 mutex_unlock(&sg_policy->work_lock); 467 mutex_unlock(&sg_policy->work_lock);
477 } 468 }
478 469
479 sg_policy->need_freq_update = true; 470 sg_policy->need_freq_update = true;
480 return 0;
481}
482
483int sugov_governor(struct cpufreq_policy *policy, unsigned int event)
484{
485 if (event == CPUFREQ_GOV_POLICY_INIT) {
486 return sugov_init(policy);
487 } else if (policy->governor_data) {
488 switch (event) {
489 case CPUFREQ_GOV_POLICY_EXIT:
490 return sugov_exit(policy);
491 case CPUFREQ_GOV_START:
492 return sugov_start(policy);
493 case CPUFREQ_GOV_STOP:
494 return sugov_stop(policy);
495 case CPUFREQ_GOV_LIMITS:
496 return sugov_limits(policy);
497 }
498 }
499 return -EINVAL;
500} 471}
501 472
502static struct cpufreq_governor schedutil_gov = { 473static struct cpufreq_governor schedutil_gov = {
503 .name = "schedutil", 474 .name = "schedutil",
504 .governor = sugov_governor,
505 .owner = THIS_MODULE, 475 .owner = THIS_MODULE,
476 .init = sugov_init,
477 .exit = sugov_exit,
478 .start = sugov_start,
479 .stop = sugov_stop,
480 .limits = sugov_limits,
506}; 481};
507 482
508static int __init sugov_module_init(void) 483static int __init sugov_module_init(void)