aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_conservative.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2012-10-25 18:47:42 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2012-11-14 18:33:07 -0500
commit4471a34f9a1f2da220272e823bdb8e8fa83a7661 (patch)
treed63e8c16a4b40da97b558d4b955f8e64157b8900 /drivers/cpufreq/cpufreq_conservative.c
parent0676f7f2e7d2adec11f40320ca43a8897b8ef906 (diff)
cpufreq: governors: remove redundant code
Initially ondemand governor was written and then using its code conservative governor is written. It used a lot of code from ondemand governor, but copy of code was created instead of using the same routines from both governors. Which increased code redundancy, which is difficult to manage. This patch is an attempt to move common part of both the governors to cpufreq_governor.c file to come over above mentioned issues. This shouldn't change anything from functionality point of view. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c548
1 files changed, 159 insertions, 389 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 181abad07266..64ef737e7e72 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -11,83 +11,30 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/cpufreq.h> 14#include <linux/cpufreq.h>
18#include <linux/cpu.h> 15#include <linux/init.h>
19#include <linux/jiffies.h> 16#include <linux/kernel.h>
20#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/kobject.h>
19#include <linux/module.h>
21#include <linux/mutex.h> 20#include <linux/mutex.h>
22#include <linux/hrtimer.h> 21#include <linux/notifier.h>
23#include <linux/tick.h> 22#include <linux/percpu-defs.h>
24#include <linux/ktime.h> 23#include <linux/sysfs.h>
25#include <linux/sched.h> 24#include <linux/types.h>
26 25
27/* 26#include "cpufreq_governor.h"
28 * dbs is used in this file as a shortform for demandbased switching
29 * It helps to keep variable names smaller, simpler
30 */
31 27
28/* Conservative governor macors */
32#define DEF_FREQUENCY_UP_THRESHOLD (80) 29#define DEF_FREQUENCY_UP_THRESHOLD (80)
33#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 30#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
34
35/*
36 * The polling frequency of this governor depends on the capability of
37 * the processor. Default polling frequency is 1000 times the transition
38 * latency of the processor. The governor will work on any processor with
39 * transition latency <= 10mS, using appropriate sampling
40 * rate.
41 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
42 * this governor will not work.
43 * All times here are in uS.
44 */
45#define MIN_SAMPLING_RATE_RATIO (2)
46
47static unsigned int min_sampling_rate;
48
49#define LATENCY_MULTIPLIER (1000)
50#define MIN_LATENCY_MULTIPLIER (100)
51#define DEF_SAMPLING_DOWN_FACTOR (1) 31#define DEF_SAMPLING_DOWN_FACTOR (1)
52#define MAX_SAMPLING_DOWN_FACTOR (10) 32#define MAX_SAMPLING_DOWN_FACTOR (10)
53#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
54
55static void do_dbs_timer(struct work_struct *work);
56
57struct cpu_dbs_info_s {
58 cputime64_t prev_cpu_idle;
59 cputime64_t prev_cpu_wall;
60 cputime64_t prev_cpu_nice;
61 struct cpufreq_policy *cur_policy;
62 struct delayed_work work;
63 unsigned int down_skip;
64 unsigned int requested_freq;
65 int cpu;
66 unsigned int enable:1;
67 /*
68 * percpu mutex that serializes governor limit change with
69 * do_dbs_timer invocation. We do not want do_dbs_timer to run
70 * when user is changing the governor or limits.
71 */
72 struct mutex timer_mutex;
73};
74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
75 33
76static unsigned int dbs_enable; /* number of CPUs using this policy */ 34static struct dbs_data cs_dbs_data;
35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
77 36
78/* 37static struct cs_dbs_tuners cs_tuners = {
79 * dbs_mutex protects dbs_enable in governor start/stop.
80 */
81static DEFINE_MUTEX(dbs_mutex);
82
83static struct dbs_tuners {
84 unsigned int sampling_rate;
85 unsigned int sampling_down_factor;
86 unsigned int up_threshold;
87 unsigned int down_threshold;
88 unsigned int ignore_nice;
89 unsigned int freq_step;
90} dbs_tuners_ins = {
91 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 38 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
92 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 39 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
93 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 40 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -95,61 +42,121 @@ static struct dbs_tuners {
95 .freq_step = 5, 42 .freq_step = 5,
96}; 43};
97 44
98/* keep track of frequency transitions */ 45/*
99static int 46 * Every sampling_rate, we check, if current idle time is less than 20%
100dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 47 * (default), then we try to increase frequency Every sampling_rate *
101 void *data) 48 * sampling_down_factor, we check, if current idle time is more than 80%, then
49 * we try to decrease frequency
50 *
51 * Any frequency increase takes it to the maximum frequency. Frequency reduction
52 * happens at minimum steps of 5% (default) of maximum frequency
53 */
54static void cs_check_cpu(int cpu, unsigned int load)
102{ 55{
103 struct cpufreq_freqs *freq = data; 56 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
104 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, 57 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
105 freq->cpu); 58 unsigned int freq_target;
59
60 /*
61 * break out if we 'cannot' reduce the speed as the user might
62 * want freq_step to be zero
63 */
64 if (cs_tuners.freq_step == 0)
65 return;
66
67 /* Check for frequency increase */
68 if (load > cs_tuners.up_threshold) {
69 dbs_info->down_skip = 0;
70
71 /* if we are already at full speed then break out early */
72 if (dbs_info->requested_freq == policy->max)
73 return;
74
75 freq_target = (cs_tuners.freq_step * policy->max) / 100;
76
77 /* max freq cannot be less than 100. But who knows.... */
78 if (unlikely(freq_target == 0))
79 freq_target = 5;
80
81 dbs_info->requested_freq += freq_target;
82 if (dbs_info->requested_freq > policy->max)
83 dbs_info->requested_freq = policy->max;
106 84
85 __cpufreq_driver_target(policy, dbs_info->requested_freq,
86 CPUFREQ_RELATION_H);
87 return;
88 }
89
90 /*
91 * The optimal frequency is the frequency that is the lowest that can
92 * support the current CPU usage without triggering the up policy. To be
93 * safe, we focus 10 points under the threshold.
94 */
95 if (load < (cs_tuners.down_threshold - 10)) {
96 freq_target = (cs_tuners.freq_step * policy->max) / 100;
97
98 dbs_info->requested_freq -= freq_target;
99 if (dbs_info->requested_freq < policy->min)
100 dbs_info->requested_freq = policy->min;
101
102 /*
103 * if we cannot reduce the frequency anymore, break out early
104 */
105 if (policy->cur == policy->min)
106 return;
107
108 __cpufreq_driver_target(policy, dbs_info->requested_freq,
109 CPUFREQ_RELATION_H);
110 return;
111 }
112}
113
114static void cs_dbs_timer(struct work_struct *work)
115{
116 struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
117 struct cs_cpu_dbs_info_s, cdbs.work.work);
118 unsigned int cpu = dbs_info->cdbs.cpu;
119 int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
120
121 mutex_lock(&dbs_info->cdbs.timer_mutex);
122
123 dbs_check_cpu(&cs_dbs_data, cpu);
124
125 schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
126 mutex_unlock(&dbs_info->cdbs.timer_mutex);
127}
128
129static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
130 void *data)
131{
132 struct cpufreq_freqs *freq = data;
133 struct cs_cpu_dbs_info_s *dbs_info =
134 &per_cpu(cs_cpu_dbs_info, freq->cpu);
107 struct cpufreq_policy *policy; 135 struct cpufreq_policy *policy;
108 136
109 if (!this_dbs_info->enable) 137 if (!dbs_info->enable)
110 return 0; 138 return 0;
111 139
112 policy = this_dbs_info->cur_policy; 140 policy = dbs_info->cdbs.cur_policy;
113 141
114 /* 142 /*
115 * we only care if our internally tracked freq moves outside 143 * we only care if our internally tracked freq moves outside the 'valid'
116 * the 'valid' ranges of freqency available to us otherwise 144 * ranges of freqency available to us otherwise we do not change it
117 * we do not change it
118 */ 145 */
119 if (this_dbs_info->requested_freq > policy->max 146 if (dbs_info->requested_freq > policy->max
120 || this_dbs_info->requested_freq < policy->min) 147 || dbs_info->requested_freq < policy->min)
121 this_dbs_info->requested_freq = freq->new; 148 dbs_info->requested_freq = freq->new;
122 149
123 return 0; 150 return 0;
124} 151}
125 152
126static struct notifier_block dbs_cpufreq_notifier_block = {
127 .notifier_call = dbs_cpufreq_notifier
128};
129
130/************************** sysfs interface ************************/ 153/************************** sysfs interface ************************/
131static ssize_t show_sampling_rate_min(struct kobject *kobj, 154static ssize_t show_sampling_rate_min(struct kobject *kobj,
132 struct attribute *attr, char *buf) 155 struct attribute *attr, char *buf)
133{ 156{
134 return sprintf(buf, "%u\n", min_sampling_rate); 157 return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
135} 158}
136 159
137define_one_global_ro(sampling_rate_min);
138
139/* cpufreq_conservative Governor Tunables */
140#define show_one(file_name, object) \
141static ssize_t show_##file_name \
142(struct kobject *kobj, struct attribute *attr, char *buf) \
143{ \
144 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
145}
146show_one(sampling_rate, sampling_rate);
147show_one(sampling_down_factor, sampling_down_factor);
148show_one(up_threshold, up_threshold);
149show_one(down_threshold, down_threshold);
150show_one(ignore_nice_load, ignore_nice);
151show_one(freq_step, freq_step);
152
153static ssize_t store_sampling_down_factor(struct kobject *a, 160static ssize_t store_sampling_down_factor(struct kobject *a,
154 struct attribute *b, 161 struct attribute *b,
155 const char *buf, size_t count) 162 const char *buf, size_t count)
@@ -161,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
161 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 168 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
162 return -EINVAL; 169 return -EINVAL;
163 170
164 dbs_tuners_ins.sampling_down_factor = input; 171 cs_tuners.sampling_down_factor = input;
165 return count; 172 return count;
166} 173}
167 174
@@ -175,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
175 if (ret != 1) 182 if (ret != 1)
176 return -EINVAL; 183 return -EINVAL;
177 184
178 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 185 cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
179 return count; 186 return count;
180} 187}
181 188
@@ -186,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
186 int ret; 193 int ret;
187 ret = sscanf(buf, "%u", &input); 194 ret = sscanf(buf, "%u", &input);
188 195
189 if (ret != 1 || input > 100 || 196 if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
190 input <= dbs_tuners_ins.down_threshold)
191 return -EINVAL; 197 return -EINVAL;
192 198
193 dbs_tuners_ins.up_threshold = input; 199 cs_tuners.up_threshold = input;
194 return count; 200 return count;
195} 201}
196 202
@@ -203,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
203 209
204 /* cannot be lower than 11 otherwise freq will not fall */ 210 /* cannot be lower than 11 otherwise freq will not fall */
205 if (ret != 1 || input < 11 || input > 100 || 211 if (ret != 1 || input < 11 || input > 100 ||
206 input >= dbs_tuners_ins.up_threshold) 212 input >= cs_tuners.up_threshold)
207 return -EINVAL; 213 return -EINVAL;
208 214
209 dbs_tuners_ins.down_threshold = input; 215 cs_tuners.down_threshold = input;
210 return count; 216 return count;
211} 217}
212 218
213static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, 219static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
214 const char *buf, size_t count) 220 const char *buf, size_t count)
215{ 221{
216 unsigned int input; 222 unsigned int input, j;
217 int ret; 223 int ret;
218 224
219 unsigned int j;
220
221 ret = sscanf(buf, "%u", &input); 225 ret = sscanf(buf, "%u", &input);
222 if (ret != 1) 226 if (ret != 1)
223 return -EINVAL; 227 return -EINVAL;
@@ -225,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
225 if (input > 1) 229 if (input > 1)
226 input = 1; 230 input = 1;
227 231
228 if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ 232 if (input == cs_tuners.ignore_nice) /* nothing to do */
229 return count; 233 return count;
230 234
231 dbs_tuners_ins.ignore_nice = input; 235 cs_tuners.ignore_nice = input;
232 236
233 /* we need to re-evaluate prev_cpu_idle */ 237 /* we need to re-evaluate prev_cpu_idle */
234 for_each_online_cpu(j) { 238 for_each_online_cpu(j) {
235 struct cpu_dbs_info_s *dbs_info; 239 struct cs_cpu_dbs_info_s *dbs_info;
236 dbs_info = &per_cpu(cs_cpu_dbs_info, j); 240 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
237 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 241 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
238 &dbs_info->prev_cpu_wall); 242 &dbs_info->cdbs.prev_cpu_wall);
239 if (dbs_tuners_ins.ignore_nice) 243 if (cs_tuners.ignore_nice)
240 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 244 dbs_info->cdbs.prev_cpu_nice =
245 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
241 } 246 }
242 return count; 247 return count;
243} 248}
@@ -255,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
255 if (input > 100) 260 if (input > 100)
256 input = 100; 261 input = 100;
257 262
258 /* no need to test here if freq_step is zero as the user might actually 263 /*
259 * want this, they would be crazy though :) */ 264 * no need to test here if freq_step is zero as the user might actually
260 dbs_tuners_ins.freq_step = input; 265 * want this, they would be crazy though :)
266 */
267 cs_tuners.freq_step = input;
261 return count; 268 return count;
262} 269}
263 270
271show_one(cs, sampling_rate, sampling_rate);
272show_one(cs, sampling_down_factor, sampling_down_factor);
273show_one(cs, up_threshold, up_threshold);
274show_one(cs, down_threshold, down_threshold);
275show_one(cs, ignore_nice_load, ignore_nice);
276show_one(cs, freq_step, freq_step);
277
264define_one_global_rw(sampling_rate); 278define_one_global_rw(sampling_rate);
265define_one_global_rw(sampling_down_factor); 279define_one_global_rw(sampling_down_factor);
266define_one_global_rw(up_threshold); 280define_one_global_rw(up_threshold);
267define_one_global_rw(down_threshold); 281define_one_global_rw(down_threshold);
268define_one_global_rw(ignore_nice_load); 282define_one_global_rw(ignore_nice_load);
269define_one_global_rw(freq_step); 283define_one_global_rw(freq_step);
284define_one_global_ro(sampling_rate_min);
270 285
271static struct attribute *dbs_attributes[] = { 286static struct attribute *dbs_attributes[] = {
272 &sampling_rate_min.attr, 287 &sampling_rate_min.attr,
@@ -279,283 +294,38 @@ static struct attribute *dbs_attributes[] = {
279 NULL 294 NULL
280}; 295};
281 296
282static struct attribute_group dbs_attr_group = { 297static struct attribute_group cs_attr_group = {
283 .attrs = dbs_attributes, 298 .attrs = dbs_attributes,
284 .name = "conservative", 299 .name = "conservative",
285}; 300};
286 301
287/************************** sysfs end ************************/ 302/************************** sysfs end ************************/
288 303
289static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 304define_get_cpu_dbs_routines(cs_cpu_dbs_info);
290{
291 unsigned int load = 0;
292 unsigned int max_load = 0;
293 unsigned int freq_target;
294
295 struct cpufreq_policy *policy;
296 unsigned int j;
297
298 policy = this_dbs_info->cur_policy;
299
300 /*
301 * Every sampling_rate, we check, if current idle time is less
302 * than 20% (default), then we try to increase frequency
303 * Every sampling_rate*sampling_down_factor, we check, if current
304 * idle time is more than 80%, then we try to decrease frequency
305 *
306 * Any frequency increase takes it to the maximum frequency.
307 * Frequency reduction happens at minimum steps of
308 * 5% (default) of maximum frequency
309 */
310
311 /* Get Absolute Load */
312 for_each_cpu(j, policy->cpus) {
313 struct cpu_dbs_info_s *j_dbs_info;
314 cputime64_t cur_wall_time, cur_idle_time;
315 unsigned int idle_time, wall_time;
316
317 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
318
319 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
320
321 wall_time = (unsigned int)
322 (cur_wall_time - j_dbs_info->prev_cpu_wall);
323 j_dbs_info->prev_cpu_wall = cur_wall_time;
324
325 idle_time = (unsigned int)
326 (cur_idle_time - j_dbs_info->prev_cpu_idle);
327 j_dbs_info->prev_cpu_idle = cur_idle_time;
328
329 if (dbs_tuners_ins.ignore_nice) {
330 u64 cur_nice;
331 unsigned long cur_nice_jiffies;
332
333 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
334 j_dbs_info->prev_cpu_nice;
335 /*
336 * Assumption: nice time between sampling periods will
337 * be less than 2^32 jiffies for 32 bit sys
338 */
339 cur_nice_jiffies = (unsigned long)
340 cputime64_to_jiffies64(cur_nice);
341
342 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
343 idle_time += jiffies_to_usecs(cur_nice_jiffies);
344 }
345
346 if (unlikely(!wall_time || wall_time < idle_time))
347 continue;
348
349 load = 100 * (wall_time - idle_time) / wall_time;
350
351 if (load > max_load)
352 max_load = load;
353 }
354 305
355 /* 306static struct notifier_block cs_cpufreq_notifier_block = {
356 * break out if we 'cannot' reduce the speed as the user might 307 .notifier_call = dbs_cpufreq_notifier,
357 * want freq_step to be zero 308};
358 */
359 if (dbs_tuners_ins.freq_step == 0)
360 return;
361
362 /* Check for frequency increase */
363 if (max_load > dbs_tuners_ins.up_threshold) {
364 this_dbs_info->down_skip = 0;
365
366 /* if we are already at full speed then break out early */
367 if (this_dbs_info->requested_freq == policy->max)
368 return;
369
370 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
371
372 /* max freq cannot be less than 100. But who knows.... */
373 if (unlikely(freq_target == 0))
374 freq_target = 5;
375
376 this_dbs_info->requested_freq += freq_target;
377 if (this_dbs_info->requested_freq > policy->max)
378 this_dbs_info->requested_freq = policy->max;
379
380 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
381 CPUFREQ_RELATION_H);
382 return;
383 }
384
385 /*
386 * The optimal frequency is the frequency that is the lowest that
387 * can support the current CPU usage without triggering the up
388 * policy. To be safe, we focus 10 points under the threshold.
389 */
390 if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
391 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
392
393 this_dbs_info->requested_freq -= freq_target;
394 if (this_dbs_info->requested_freq < policy->min)
395 this_dbs_info->requested_freq = policy->min;
396
397 /*
398 * if we cannot reduce the frequency anymore, break out early
399 */
400 if (policy->cur == policy->min)
401 return;
402
403 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
404 CPUFREQ_RELATION_H);
405 return;
406 }
407}
408
409static void do_dbs_timer(struct work_struct *work)
410{
411 struct cpu_dbs_info_s *dbs_info =
412 container_of(work, struct cpu_dbs_info_s, work.work);
413 unsigned int cpu = dbs_info->cpu;
414
415 /* We want all CPUs to do sampling nearly on same jiffy */
416 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
417
418 delay -= jiffies % delay;
419
420 mutex_lock(&dbs_info->timer_mutex);
421
422 dbs_check_cpu(dbs_info);
423
424 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
425 mutex_unlock(&dbs_info->timer_mutex);
426}
427
428static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
429{
430 /* We want all CPUs to do sampling nearly on same jiffy */
431 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
432 delay -= jiffies % delay;
433 309
434 dbs_info->enable = 1; 310static struct cs_ops cs_ops = {
435 INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); 311 .notifier_block = &cs_cpufreq_notifier_block,
436 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); 312};
437}
438 313
439static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 314static struct dbs_data cs_dbs_data = {
440{ 315 .governor = GOV_CONSERVATIVE,
441 dbs_info->enable = 0; 316 .attr_group = &cs_attr_group,
442 cancel_delayed_work_sync(&dbs_info->work); 317 .tuners = &cs_tuners,
443} 318 .get_cpu_cdbs = get_cpu_cdbs,
319 .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
320 .gov_dbs_timer = cs_dbs_timer,
321 .gov_check_cpu = cs_check_cpu,
322 .gov_ops = &cs_ops,
323};
444 324
445static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 325static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
446 unsigned int event) 326 unsigned int event)
447{ 327{
448 unsigned int cpu = policy->cpu; 328 return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
449 struct cpu_dbs_info_s *this_dbs_info;
450 unsigned int j;
451 int rc;
452
453 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
454
455 switch (event) {
456 case CPUFREQ_GOV_START:
457 if ((!cpu_online(cpu)) || (!policy->cur))
458 return -EINVAL;
459
460 mutex_lock(&dbs_mutex);
461
462 for_each_cpu(j, policy->cpus) {
463 struct cpu_dbs_info_s *j_dbs_info;
464 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
465 j_dbs_info->cur_policy = policy;
466
467 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
468 &j_dbs_info->prev_cpu_wall);
469 if (dbs_tuners_ins.ignore_nice)
470 j_dbs_info->prev_cpu_nice =
471 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
472 }
473 this_dbs_info->cpu = cpu;
474 this_dbs_info->down_skip = 0;
475 this_dbs_info->requested_freq = policy->cur;
476
477 mutex_init(&this_dbs_info->timer_mutex);
478 dbs_enable++;
479 /*
480 * Start the timerschedule work, when this governor
481 * is used for first time
482 */
483 if (dbs_enable == 1) {
484 unsigned int latency;
485 /* policy latency is in nS. Convert it to uS first */
486 latency = policy->cpuinfo.transition_latency / 1000;
487 if (latency == 0)
488 latency = 1;
489
490 rc = sysfs_create_group(cpufreq_global_kobject,
491 &dbs_attr_group);
492 if (rc) {
493 mutex_unlock(&dbs_mutex);
494 return rc;
495 }
496
497 /*
498 * conservative does not implement micro like ondemand
499 * governor, thus we are bound to jiffes/HZ
500 */
501 min_sampling_rate =
502 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
503 /* Bring kernel and HW constraints together */
504 min_sampling_rate = max(min_sampling_rate,
505 MIN_LATENCY_MULTIPLIER * latency);
506 dbs_tuners_ins.sampling_rate =
507 max(min_sampling_rate,
508 latency * LATENCY_MULTIPLIER);
509
510 cpufreq_register_notifier(
511 &dbs_cpufreq_notifier_block,
512 CPUFREQ_TRANSITION_NOTIFIER);
513 }
514 mutex_unlock(&dbs_mutex);
515
516 dbs_timer_init(this_dbs_info);
517
518 break;
519
520 case CPUFREQ_GOV_STOP:
521 dbs_timer_exit(this_dbs_info);
522
523 mutex_lock(&dbs_mutex);
524 dbs_enable--;
525 mutex_destroy(&this_dbs_info->timer_mutex);
526
527 /*
528 * Stop the timerschedule work, when this governor
529 * is used for first time
530 */
531 if (dbs_enable == 0)
532 cpufreq_unregister_notifier(
533 &dbs_cpufreq_notifier_block,
534 CPUFREQ_TRANSITION_NOTIFIER);
535
536 mutex_unlock(&dbs_mutex);
537 if (!dbs_enable)
538 sysfs_remove_group(cpufreq_global_kobject,
539 &dbs_attr_group);
540
541 break;
542
543 case CPUFREQ_GOV_LIMITS:
544 mutex_lock(&this_dbs_info->timer_mutex);
545 if (policy->max < this_dbs_info->cur_policy->cur)
546 __cpufreq_driver_target(
547 this_dbs_info->cur_policy,
548 policy->max, CPUFREQ_RELATION_H);
549 else if (policy->min > this_dbs_info->cur_policy->cur)
550 __cpufreq_driver_target(
551 this_dbs_info->cur_policy,
552 policy->min, CPUFREQ_RELATION_L);
553 dbs_check_cpu(this_dbs_info);
554 mutex_unlock(&this_dbs_info->timer_mutex);
555
556 break;
557 }
558 return 0;
559} 329}
560 330
561#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE 331#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -563,13 +333,14 @@ static
563#endif 333#endif
564struct cpufreq_governor cpufreq_gov_conservative = { 334struct cpufreq_governor cpufreq_gov_conservative = {
565 .name = "conservative", 335 .name = "conservative",
566 .governor = cpufreq_governor_dbs, 336 .governor = cs_cpufreq_governor_dbs,
567 .max_transition_latency = TRANSITION_LATENCY_LIMIT, 337 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
568 .owner = THIS_MODULE, 338 .owner = THIS_MODULE,
569}; 339};
570 340
571static int __init cpufreq_gov_dbs_init(void) 341static int __init cpufreq_gov_dbs_init(void)
572{ 342{
343 mutex_init(&cs_dbs_data.mutex);
573 return cpufreq_register_governor(&cpufreq_gov_conservative); 344 return cpufreq_register_governor(&cpufreq_gov_conservative);
574} 345}
575 346
@@ -578,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
578 cpufreq_unregister_governor(&cpufreq_gov_conservative); 349 cpufreq_unregister_governor(&cpufreq_gov_conservative);
579} 350}
580 351
581
582MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); 352MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
583MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " 353MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
584 "Low Latency Frequency Transition capable processors " 354 "Low Latency Frequency Transition capable processors "