aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_conservative.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-03-27 11:58:58 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-03-31 19:11:34 -0400
commit4d5dcc4211f9def4281eafb54b8ed483862e8135 (patch)
tree7f3c725675ce3042d2d2eb86b0b40f93cef73de3 /drivers/cpufreq/cpufreq_conservative.c
parent7bd353a995d9049262661d85811d6109140582a3 (diff)
cpufreq: governor: Implement per policy instances of governors
Currently, there can't be multiple instances of single governor_type. If we have a multi-package system, where we have multiple instances of struct policy (per package), we can't have multiple instances of same governor. i.e. We can't have multiple instances of ondemand governor for multiple packages. Governors directory in sysfs is created at /sys/devices/system/cpu/cpufreq/ governor-name/. Which again reflects that there can be only one instance of a governor_type in the system. This is a bottleneck for multicluster system, where we want different packages to use same governor type, but with different tunables. This patch uses the infrastructure provided by earlier patch and implements init/exit routines for ondemand and conservative governors. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c193
1 files changed, 118 insertions, 75 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4fd0006b1291..98b49462f4e9 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -20,6 +20,7 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/percpu-defs.h> 22#include <linux/percpu-defs.h>
23#include <linux/slab.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
24#include <linux/types.h> 25#include <linux/types.h>
25 26
@@ -31,17 +32,8 @@
31#define DEF_SAMPLING_DOWN_FACTOR (1) 32#define DEF_SAMPLING_DOWN_FACTOR (1)
32#define MAX_SAMPLING_DOWN_FACTOR (10) 33#define MAX_SAMPLING_DOWN_FACTOR (10)
33 34
34static struct dbs_data cs_dbs_data;
35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); 35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
36 36
37static struct cs_dbs_tuners cs_tuners = {
38 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
39 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
40 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
41 .ignore_nice = 0,
42 .freq_step = 5,
43};
44
45/* 37/*
46 * Every sampling_rate, we check, if current idle time is less than 20% 38 * Every sampling_rate, we check, if current idle time is less than 20%
47 * (default), then we try to increase frequency Every sampling_rate * 39 * (default), then we try to increase frequency Every sampling_rate *
@@ -55,24 +47,26 @@ static void cs_check_cpu(int cpu, unsigned int load)
55{ 47{
56 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); 48 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
57 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 49 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
50 struct dbs_data *dbs_data = policy->governor_data;
51 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
58 unsigned int freq_target; 52 unsigned int freq_target;
59 53
60 /* 54 /*
61 * break out if we 'cannot' reduce the speed as the user might 55 * break out if we 'cannot' reduce the speed as the user might
62 * want freq_step to be zero 56 * want freq_step to be zero
63 */ 57 */
64 if (cs_tuners.freq_step == 0) 58 if (cs_tuners->freq_step == 0)
65 return; 59 return;
66 60
67 /* Check for frequency increase */ 61 /* Check for frequency increase */
68 if (load > cs_tuners.up_threshold) { 62 if (load > cs_tuners->up_threshold) {
69 dbs_info->down_skip = 0; 63 dbs_info->down_skip = 0;
70 64
71 /* if we are already at full speed then break out early */ 65 /* if we are already at full speed then break out early */
72 if (dbs_info->requested_freq == policy->max) 66 if (dbs_info->requested_freq == policy->max)
73 return; 67 return;
74 68
75 freq_target = (cs_tuners.freq_step * policy->max) / 100; 69 freq_target = (cs_tuners->freq_step * policy->max) / 100;
76 70
77 /* max freq cannot be less than 100. But who knows.... */ 71 /* max freq cannot be less than 100. But who knows.... */
78 if (unlikely(freq_target == 0)) 72 if (unlikely(freq_target == 0))
@@ -92,8 +86,8 @@ static void cs_check_cpu(int cpu, unsigned int load)
92 * support the current CPU usage without triggering the up policy. To be 86 * support the current CPU usage without triggering the up policy. To be
93 * safe, we focus 10 points under the threshold. 87 * safe, we focus 10 points under the threshold.
94 */ 88 */
95 if (load < (cs_tuners.down_threshold - 10)) { 89 if (load < (cs_tuners->down_threshold - 10)) {
96 freq_target = (cs_tuners.freq_step * policy->max) / 100; 90 freq_target = (cs_tuners->freq_step * policy->max) / 100;
97 91
98 dbs_info->requested_freq -= freq_target; 92 dbs_info->requested_freq -= freq_target;
99 if (dbs_info->requested_freq < policy->min) 93 if (dbs_info->requested_freq < policy->min)
@@ -119,11 +113,13 @@ static void cs_dbs_timer(struct work_struct *work)
119 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; 113 unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
120 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, 114 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
121 cpu); 115 cpu);
122 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); 116 struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
117 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
118 int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
123 119
124 mutex_lock(&core_dbs_info->cdbs.timer_mutex); 120 mutex_lock(&core_dbs_info->cdbs.timer_mutex);
125 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate)) 121 if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
126 dbs_check_cpu(&cs_dbs_data, cpu); 122 dbs_check_cpu(dbs_data, cpu);
127 123
128 schedule_delayed_work_on(smp_processor_id(), dw, delay); 124 schedule_delayed_work_on(smp_processor_id(), dw, delay);
129 mutex_unlock(&core_dbs_info->cdbs.timer_mutex); 125 mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
@@ -154,16 +150,12 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
154} 150}
155 151
156/************************** sysfs interface ************************/ 152/************************** sysfs interface ************************/
157static ssize_t show_sampling_rate_min(struct kobject *kobj, 153static struct common_dbs_data cs_dbs_cdata;
158 struct attribute *attr, char *buf)
159{
160 return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
161}
162 154
163static ssize_t store_sampling_down_factor(struct kobject *a, 155static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
164 struct attribute *b, 156 const char *buf, size_t count)
165 const char *buf, size_t count)
166{ 157{
158 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
167 unsigned int input; 159 unsigned int input;
168 int ret; 160 int ret;
169 ret = sscanf(buf, "%u", &input); 161 ret = sscanf(buf, "%u", &input);
@@ -171,13 +163,14 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
171 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 163 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
172 return -EINVAL; 164 return -EINVAL;
173 165
174 cs_tuners.sampling_down_factor = input; 166 cs_tuners->sampling_down_factor = input;
175 return count; 167 return count;
176} 168}
177 169
178static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 170static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
179 const char *buf, size_t count) 171 size_t count)
180{ 172{
173 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181 unsigned int input; 174 unsigned int input;
182 int ret; 175 int ret;
183 ret = sscanf(buf, "%u", &input); 176 ret = sscanf(buf, "%u", &input);
@@ -185,43 +178,46 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
185 if (ret != 1) 178 if (ret != 1)
186 return -EINVAL; 179 return -EINVAL;
187 180
188 cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); 181 cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
189 return count; 182 return count;
190} 183}
191 184
192static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, 185static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
193 const char *buf, size_t count) 186 size_t count)
194{ 187{
188 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
195 unsigned int input; 189 unsigned int input;
196 int ret; 190 int ret;
197 ret = sscanf(buf, "%u", &input); 191 ret = sscanf(buf, "%u", &input);
198 192
199 if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) 193 if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
200 return -EINVAL; 194 return -EINVAL;
201 195
202 cs_tuners.up_threshold = input; 196 cs_tuners->up_threshold = input;
203 return count; 197 return count;
204} 198}
205 199
206static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, 200static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
207 const char *buf, size_t count) 201 size_t count)
208{ 202{
203 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
209 unsigned int input; 204 unsigned int input;
210 int ret; 205 int ret;
211 ret = sscanf(buf, "%u", &input); 206 ret = sscanf(buf, "%u", &input);
212 207
213 /* cannot be lower than 11 otherwise freq will not fall */ 208 /* cannot be lower than 11 otherwise freq will not fall */
214 if (ret != 1 || input < 11 || input > 100 || 209 if (ret != 1 || input < 11 || input > 100 ||
215 input >= cs_tuners.up_threshold) 210 input >= cs_tuners->up_threshold)
216 return -EINVAL; 211 return -EINVAL;
217 212
218 cs_tuners.down_threshold = input; 213 cs_tuners->down_threshold = input;
219 return count; 214 return count;
220} 215}
221 216
222static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 217static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
223 const char *buf, size_t count) 218 size_t count)
224{ 219{
220 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
225 unsigned int input, j; 221 unsigned int input, j;
226 int ret; 222 int ret;
227 223
@@ -232,10 +228,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
232 if (input > 1) 228 if (input > 1)
233 input = 1; 229 input = 1;
234 230
235 if (input == cs_tuners.ignore_nice) /* nothing to do */ 231 if (input == cs_tuners->ignore_nice) /* nothing to do */
236 return count; 232 return count;
237 233
238 cs_tuners.ignore_nice = input; 234 cs_tuners->ignore_nice = input;
239 235
240 /* we need to re-evaluate prev_cpu_idle */ 236 /* we need to re-evaluate prev_cpu_idle */
241 for_each_online_cpu(j) { 237 for_each_online_cpu(j) {
@@ -243,16 +239,17 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
243 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 239 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
244 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, 240 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
245 &dbs_info->cdbs.prev_cpu_wall); 241 &dbs_info->cdbs.prev_cpu_wall);
246 if (cs_tuners.ignore_nice) 242 if (cs_tuners->ignore_nice)
247 dbs_info->cdbs.prev_cpu_nice = 243 dbs_info->cdbs.prev_cpu_nice =
248 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 244 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
249 } 245 }
250 return count; 246 return count;
251} 247}
252 248
253static ssize_t store_freq_step(struct kobject *a, struct attribute *b, 249static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
254 const char *buf, size_t count) 250 size_t count)
255{ 251{
252 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
256 unsigned int input; 253 unsigned int input;
257 int ret; 254 int ret;
258 ret = sscanf(buf, "%u", &input); 255 ret = sscanf(buf, "%u", &input);
@@ -267,43 +264,88 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
267 * no need to test here if freq_step is zero as the user might actually 264 * no need to test here if freq_step is zero as the user might actually
268 * want this, they would be crazy though :) 265 * want this, they would be crazy though :)
269 */ 266 */
270 cs_tuners.freq_step = input; 267 cs_tuners->freq_step = input;
271 return count; 268 return count;
272} 269}
273 270
274show_one(cs, sampling_rate, sampling_rate); 271show_store_one(cs, sampling_rate);
275show_one(cs, sampling_down_factor, sampling_down_factor); 272show_store_one(cs, sampling_down_factor);
276show_one(cs, up_threshold, up_threshold); 273show_store_one(cs, up_threshold);
277show_one(cs, down_threshold, down_threshold); 274show_store_one(cs, down_threshold);
278show_one(cs, ignore_nice_load, ignore_nice); 275show_store_one(cs, ignore_nice);
279show_one(cs, freq_step, freq_step); 276show_store_one(cs, freq_step);
280 277declare_show_sampling_rate_min(cs);
281define_one_global_rw(sampling_rate); 278
282define_one_global_rw(sampling_down_factor); 279gov_sys_pol_attr_rw(sampling_rate);
283define_one_global_rw(up_threshold); 280gov_sys_pol_attr_rw(sampling_down_factor);
284define_one_global_rw(down_threshold); 281gov_sys_pol_attr_rw(up_threshold);
285define_one_global_rw(ignore_nice_load); 282gov_sys_pol_attr_rw(down_threshold);
286define_one_global_rw(freq_step); 283gov_sys_pol_attr_rw(ignore_nice);
287define_one_global_ro(sampling_rate_min); 284gov_sys_pol_attr_rw(freq_step);
288 285gov_sys_pol_attr_ro(sampling_rate_min);
289static struct attribute *dbs_attributes[] = { 286
290 &sampling_rate_min.attr, 287static struct attribute *dbs_attributes_gov_sys[] = {
291 &sampling_rate.attr, 288 &sampling_rate_min_gov_sys.attr,
292 &sampling_down_factor.attr, 289 &sampling_rate_gov_sys.attr,
293 &up_threshold.attr, 290 &sampling_down_factor_gov_sys.attr,
294 &down_threshold.attr, 291 &up_threshold_gov_sys.attr,
295 &ignore_nice_load.attr, 292 &down_threshold_gov_sys.attr,
296 &freq_step.attr, 293 &ignore_nice_gov_sys.attr,
294 &freq_step_gov_sys.attr,
297 NULL 295 NULL
298}; 296};
299 297
300static struct attribute_group cs_attr_group = { 298static struct attribute_group cs_attr_group_gov_sys = {
301 .attrs = dbs_attributes, 299 .attrs = dbs_attributes_gov_sys,
300 .name = "conservative",
301};
302
303static struct attribute *dbs_attributes_gov_pol[] = {
304 &sampling_rate_min_gov_pol.attr,
305 &sampling_rate_gov_pol.attr,
306 &sampling_down_factor_gov_pol.attr,
307 &up_threshold_gov_pol.attr,
308 &down_threshold_gov_pol.attr,
309 &ignore_nice_gov_pol.attr,
310 &freq_step_gov_pol.attr,
311 NULL
312};
313
314static struct attribute_group cs_attr_group_gov_pol = {
315 .attrs = dbs_attributes_gov_pol,
302 .name = "conservative", 316 .name = "conservative",
303}; 317};
304 318
305/************************** sysfs end ************************/ 319/************************** sysfs end ************************/
306 320
321static int cs_init(struct dbs_data *dbs_data)
322{
323 struct cs_dbs_tuners *tuners;
324
325 tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
326 if (!tuners) {
327 pr_err("%s: kzalloc failed\n", __func__);
328 return -ENOMEM;
329 }
330
331 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
332 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
333 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
334 tuners->ignore_nice = 0;
335 tuners->freq_step = 5;
336
337 dbs_data->tuners = tuners;
338 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
339 jiffies_to_usecs(10);
340 mutex_init(&dbs_data->mutex);
341 return 0;
342}
343
344static void cs_exit(struct dbs_data *dbs_data)
345{
346 kfree(dbs_data->tuners);
347}
348
307define_get_cpu_dbs_routines(cs_cpu_dbs_info); 349define_get_cpu_dbs_routines(cs_cpu_dbs_info);
308 350
309static struct notifier_block cs_cpufreq_notifier_block = { 351static struct notifier_block cs_cpufreq_notifier_block = {
@@ -314,21 +356,23 @@ static struct cs_ops cs_ops = {
314 .notifier_block = &cs_cpufreq_notifier_block, 356 .notifier_block = &cs_cpufreq_notifier_block,
315}; 357};
316 358
317static struct dbs_data cs_dbs_data = { 359static struct common_dbs_data cs_dbs_cdata = {
318 .governor = GOV_CONSERVATIVE, 360 .governor = GOV_CONSERVATIVE,
319 .attr_group = &cs_attr_group, 361 .attr_group_gov_sys = &cs_attr_group_gov_sys,
320 .tuners = &cs_tuners, 362 .attr_group_gov_pol = &cs_attr_group_gov_pol,
321 .get_cpu_cdbs = get_cpu_cdbs, 363 .get_cpu_cdbs = get_cpu_cdbs,
322 .get_cpu_dbs_info_s = get_cpu_dbs_info_s, 364 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
323 .gov_dbs_timer = cs_dbs_timer, 365 .gov_dbs_timer = cs_dbs_timer,
324 .gov_check_cpu = cs_check_cpu, 366 .gov_check_cpu = cs_check_cpu,
325 .gov_ops = &cs_ops, 367 .gov_ops = &cs_ops,
368 .init = cs_init,
369 .exit = cs_exit,
326}; 370};
327 371
328static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, 372static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
329 unsigned int event) 373 unsigned int event)
330{ 374{
331 return cpufreq_governor_dbs(&cs_dbs_data, policy, event); 375 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
332} 376}
333 377
334#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 378#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -343,7 +387,6 @@ struct cpufreq_governor cpufreq_gov_conservative = {
343 387
344static int __init cpufreq_gov_dbs_init(void) 388static int __init cpufreq_gov_dbs_init(void)
345{ 389{
346 mutex_init(&cs_dbs_data.mutex);
347 return cpufreq_register_governor(&cpufreq_gov_conservative); 390 return cpufreq_register_governor(&cpufreq_gov_conservative);
348} 391}
349 392