aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c180
1 files changed, 91 insertions, 89 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8d83a21c6477..c1fc9c62bb51 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -34,13 +34,9 @@
34 */ 34 */
35 35
36#define DEF_FREQUENCY_UP_THRESHOLD (80) 36#define DEF_FREQUENCY_UP_THRESHOLD (80)
37#define MIN_FREQUENCY_UP_THRESHOLD (0) 37#define MIN_FREQUENCY_UP_THRESHOLD (11)
38#define MAX_FREQUENCY_UP_THRESHOLD (100) 38#define MAX_FREQUENCY_UP_THRESHOLD (100)
39 39
40#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
43
44/* 40/*
45 * The polling frequency of this governor depends on the capability of 41 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition 42 * the processor. Default polling frequency is 1000 times the transition
@@ -55,9 +51,9 @@ static unsigned int def_sampling_rate;
55#define MIN_SAMPLING_RATE (def_sampling_rate / 2) 51#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 52#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 53#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (10) 54#define DEF_SAMPLING_DOWN_FACTOR (1)
55#define MAX_SAMPLING_DOWN_FACTOR (10)
59#define TRANSITION_LATENCY_LIMIT (10 * 1000) 56#define TRANSITION_LATENCY_LIMIT (10 * 1000)
60#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)))
61 57
62static void do_dbs_timer(void *data); 58static void do_dbs_timer(void *data);
63 59
@@ -78,15 +74,23 @@ struct dbs_tuners {
78 unsigned int sampling_rate; 74 unsigned int sampling_rate;
79 unsigned int sampling_down_factor; 75 unsigned int sampling_down_factor;
80 unsigned int up_threshold; 76 unsigned int up_threshold;
81 unsigned int down_threshold; 77 unsigned int ignore_nice;
82}; 78};
83 79
84static struct dbs_tuners dbs_tuners_ins = { 80static struct dbs_tuners dbs_tuners_ins = {
85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 81 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
86 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
87 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 82 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
88}; 83};
89 84
85static inline unsigned int get_cpu_idle_time(unsigned int cpu)
86{
87 return kstat_cpu(cpu).cpustat.idle +
88 kstat_cpu(cpu).cpustat.iowait +
89 ( !dbs_tuners_ins.ignore_nice ?
90 kstat_cpu(cpu).cpustat.nice :
91 0);
92}
93
90/************************** sysfs interface ************************/ 94/************************** sysfs interface ************************/
91static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 95static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
92{ 96{
@@ -115,7 +119,7 @@ static ssize_t show_##file_name \
115show_one(sampling_rate, sampling_rate); 119show_one(sampling_rate, sampling_rate);
116show_one(sampling_down_factor, sampling_down_factor); 120show_one(sampling_down_factor, sampling_down_factor);
117show_one(up_threshold, up_threshold); 121show_one(up_threshold, up_threshold);
118show_one(down_threshold, down_threshold); 122show_one(ignore_nice, ignore_nice);
119 123
120static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 124static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
121 const char *buf, size_t count) 125 const char *buf, size_t count)
@@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
126 if (ret != 1 ) 130 if (ret != 1 )
127 return -EINVAL; 131 return -EINVAL;
128 132
133 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
134 return -EINVAL;
135
129 down(&dbs_sem); 136 down(&dbs_sem);
130 dbs_tuners_ins.sampling_down_factor = input; 137 dbs_tuners_ins.sampling_down_factor = input;
131 up(&dbs_sem); 138 up(&dbs_sem);
@@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
161 168
162 down(&dbs_sem); 169 down(&dbs_sem);
163 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 170 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
164 input < MIN_FREQUENCY_UP_THRESHOLD || 171 input < MIN_FREQUENCY_UP_THRESHOLD) {
165 input <= dbs_tuners_ins.down_threshold) {
166 up(&dbs_sem); 172 up(&dbs_sem);
167 return -EINVAL; 173 return -EINVAL;
168 } 174 }
@@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
173 return count; 179 return count;
174} 180}
175 181
176static ssize_t store_down_threshold(struct cpufreq_policy *unused, 182static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
177 const char *buf, size_t count) 183 const char *buf, size_t count)
178{ 184{
179 unsigned int input; 185 unsigned int input;
180 int ret; 186 int ret;
187
188 unsigned int j;
189
181 ret = sscanf (buf, "%u", &input); 190 ret = sscanf (buf, "%u", &input);
191 if ( ret != 1 )
192 return -EINVAL;
182 193
194 if ( input > 1 )
195 input = 1;
196
183 down(&dbs_sem); 197 down(&dbs_sem);
184 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 198 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
185 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
186 input >= dbs_tuners_ins.up_threshold) {
187 up(&dbs_sem); 199 up(&dbs_sem);
188 return -EINVAL; 200 return count;
189 } 201 }
202 dbs_tuners_ins.ignore_nice = input;
190 203
191 dbs_tuners_ins.down_threshold = input; 204 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
205 for_each_online_cpu(j) {
206 struct cpu_dbs_info_s *j_dbs_info;
207 j_dbs_info = &per_cpu(cpu_dbs_info, j);
208 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
209 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
210 }
192 up(&dbs_sem); 211 up(&dbs_sem);
193 212
194 return count; 213 return count;
@@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
201define_one_rw(sampling_rate); 220define_one_rw(sampling_rate);
202define_one_rw(sampling_down_factor); 221define_one_rw(sampling_down_factor);
203define_one_rw(up_threshold); 222define_one_rw(up_threshold);
204define_one_rw(down_threshold); 223define_one_rw(ignore_nice);
205 224
206static struct attribute * dbs_attributes[] = { 225static struct attribute * dbs_attributes[] = {
207 &sampling_rate_max.attr, 226 &sampling_rate_max.attr,
@@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = {
209 &sampling_rate.attr, 228 &sampling_rate.attr,
210 &sampling_down_factor.attr, 229 &sampling_down_factor.attr,
211 &up_threshold.attr, 230 &up_threshold.attr,
212 &down_threshold.attr, 231 &ignore_nice.attr,
213 NULL 232 NULL
214}; 233};
215 234
@@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = {
222 241
223static void dbs_check_cpu(int cpu) 242static void dbs_check_cpu(int cpu)
224{ 243{
225 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 244 unsigned int idle_ticks, up_idle_ticks, total_ticks;
226 unsigned int total_idle_ticks; 245 unsigned int freq_next;
227 unsigned int freq_down_step;
228 unsigned int freq_down_sampling_rate; 246 unsigned int freq_down_sampling_rate;
229 static int down_skip[NR_CPUS]; 247 static int down_skip[NR_CPUS];
230 struct cpu_dbs_info_s *this_dbs_info; 248 struct cpu_dbs_info_s *this_dbs_info;
@@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu)
238 256
239 policy = this_dbs_info->cur_policy; 257 policy = this_dbs_info->cur_policy;
240 /* 258 /*
241 * The default safe range is 20% to 80% 259 * Every sampling_rate, we check, if current idle time is less
242 * Every sampling_rate, we check 260 * than 20% (default), then we try to increase frequency
243 * - If current idle time is less than 20%, then we try to 261 * Every sampling_rate*sampling_down_factor, we look for a the lowest
244 * increase frequency 262 * frequency which can sustain the load while keeping idle time over
245 * Every sampling_rate*sampling_down_factor, we check 263 * 30%. If such a frequency exist, we try to decrease to this frequency.
246 * - If current idle time is more than 80%, then we try to
247 * decrease frequency
248 * 264 *
249 * Any frequency increase takes it to the maximum frequency. 265 * Any frequency increase takes it to the maximum frequency.
250 * Frequency reduction happens at minimum steps of 266 * Frequency reduction happens at minimum steps of
251 * 5% of max_frequency 267 * 5% (default) of current frequency
252 */ 268 */
253 269
254 /* Check for frequency increase */ 270 /* Check for frequency increase */
255 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 271 idle_ticks = UINT_MAX;
256 kstat_cpu(cpu).cpustat.iowait;
257 idle_ticks = total_idle_ticks -
258 this_dbs_info->prev_cpu_idle_up;
259 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
260
261
262 for_each_cpu_mask(j, policy->cpus) { 272 for_each_cpu_mask(j, policy->cpus) {
263 unsigned int tmp_idle_ticks; 273 unsigned int tmp_idle_ticks, total_idle_ticks;
264 struct cpu_dbs_info_s *j_dbs_info; 274 struct cpu_dbs_info_s *j_dbs_info;
265 275
266 if (j == cpu)
267 continue;
268
269 j_dbs_info = &per_cpu(cpu_dbs_info, j); 276 j_dbs_info = &per_cpu(cpu_dbs_info, j);
270 /* Check for frequency increase */ 277 total_idle_ticks = get_cpu_idle_time(j);
271 total_idle_ticks = kstat_cpu(j).cpustat.idle +
272 kstat_cpu(j).cpustat.iowait;
273 tmp_idle_ticks = total_idle_ticks - 278 tmp_idle_ticks = total_idle_ticks -
274 j_dbs_info->prev_cpu_idle_up; 279 j_dbs_info->prev_cpu_idle_up;
275 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 280 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu)
281 /* Scale idle ticks by 100 and compare with up and down ticks */ 286 /* Scale idle ticks by 100 and compare with up and down ticks */
282 idle_ticks *= 100; 287 idle_ticks *= 100;
283 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 288 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
284 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); 289 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
285 290
286 if (idle_ticks < up_idle_ticks) { 291 if (idle_ticks < up_idle_ticks) {
292 down_skip[cpu] = 0;
293 for_each_cpu_mask(j, policy->cpus) {
294 struct cpu_dbs_info_s *j_dbs_info;
295
296 j_dbs_info = &per_cpu(cpu_dbs_info, j);
297 j_dbs_info->prev_cpu_idle_down =
298 j_dbs_info->prev_cpu_idle_up;
299 }
300 /* if we are already at full speed then break out early */
301 if (policy->cur == policy->max)
302 return;
303
287 __cpufreq_driver_target(policy, policy->max, 304 __cpufreq_driver_target(policy, policy->max,
288 CPUFREQ_RELATION_H); 305 CPUFREQ_RELATION_H);
289 down_skip[cpu] = 0;
290 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
291 return; 306 return;
292 } 307 }
293 308
@@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu)
296 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 311 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
297 return; 312 return;
298 313
299 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 314 idle_ticks = UINT_MAX;
300 kstat_cpu(cpu).cpustat.iowait;
301 idle_ticks = total_idle_ticks -
302 this_dbs_info->prev_cpu_idle_down;
303 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
304
305 for_each_cpu_mask(j, policy->cpus) { 315 for_each_cpu_mask(j, policy->cpus) {
306 unsigned int tmp_idle_ticks; 316 unsigned int tmp_idle_ticks, total_idle_ticks;
307 struct cpu_dbs_info_s *j_dbs_info; 317 struct cpu_dbs_info_s *j_dbs_info;
308 318
309 if (j == cpu)
310 continue;
311
312 j_dbs_info = &per_cpu(cpu_dbs_info, j); 319 j_dbs_info = &per_cpu(cpu_dbs_info, j);
313 /* Check for frequency increase */ 320 /* Check for frequency decrease */
314 total_idle_ticks = kstat_cpu(j).cpustat.idle + 321 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
315 kstat_cpu(j).cpustat.iowait;
316 tmp_idle_ticks = total_idle_ticks - 322 tmp_idle_ticks = total_idle_ticks -
317 j_dbs_info->prev_cpu_idle_down; 323 j_dbs_info->prev_cpu_idle_down;
318 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 324 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu)
321 idle_ticks = tmp_idle_ticks; 327 idle_ticks = tmp_idle_ticks;
322 } 328 }
323 329
324 /* Scale idle ticks by 100 and compare with up and down ticks */
325 idle_ticks *= 100;
326 down_skip[cpu] = 0; 330 down_skip[cpu] = 0;
331 /* if we cannot reduce the frequency anymore, break out early */
332 if (policy->cur == policy->min)
333 return;
327 334
335 /* Compute how many ticks there are between two measurements */
328 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 336 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
329 dbs_tuners_ins.sampling_down_factor; 337 dbs_tuners_ins.sampling_down_factor;
330 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 338 total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
331 sampling_rate_in_HZ(freq_down_sampling_rate);
332 339
333 if (idle_ticks > down_idle_ticks ) { 340 /*
334 freq_down_step = (5 * policy->max) / 100; 341 * The optimal frequency is the frequency that is the lowest that
335 342 * can support the current CPU usage without triggering the up
336 /* max freq cannot be less than 100. But who knows.... */ 343 * policy. To be safe, we focus 10 points under the threshold.
337 if (unlikely(freq_down_step == 0)) 344 */
338 freq_down_step = 5; 345 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
346 freq_next = (freq_next * policy->cur) /
347 (dbs_tuners_ins.up_threshold - 10);
339 348
340 __cpufreq_driver_target(policy, 349 if (freq_next <= ((policy->cur * 95) / 100))
341 policy->cur - freq_down_step, 350 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
342 CPUFREQ_RELATION_H);
343 return;
344 }
345} 351}
346 352
347static void do_dbs_timer(void *data) 353static void do_dbs_timer(void *data)
348{ 354{
349 int i; 355 int i;
350 down(&dbs_sem); 356 down(&dbs_sem);
351 for (i = 0; i < NR_CPUS; i++) 357 for_each_online_cpu(i)
352 if (cpu_online(i)) 358 dbs_check_cpu(i);
353 dbs_check_cpu(i);
354 schedule_delayed_work(&dbs_work, 359 schedule_delayed_work(&dbs_work,
355 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); 360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
356 up(&dbs_sem); 361 up(&dbs_sem);
357} 362}
358 363
@@ -360,7 +365,7 @@ static inline void dbs_timer_init(void)
360{ 365{
361 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 366 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
362 schedule_delayed_work(&dbs_work, 367 schedule_delayed_work(&dbs_work,
363 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); 368 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
364 return; 369 return;
365} 370}
366 371
@@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
397 j_dbs_info = &per_cpu(cpu_dbs_info, j); 402 j_dbs_info = &per_cpu(cpu_dbs_info, j);
398 j_dbs_info->cur_policy = policy; 403 j_dbs_info->cur_policy = policy;
399 404
400 j_dbs_info->prev_cpu_idle_up = 405 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
401 kstat_cpu(j).cpustat.idle + 406 j_dbs_info->prev_cpu_idle_down
402 kstat_cpu(j).cpustat.iowait; 407 = j_dbs_info->prev_cpu_idle_up;
403 j_dbs_info->prev_cpu_idle_down =
404 kstat_cpu(j).cpustat.idle +
405 kstat_cpu(j).cpustat.iowait;
406 } 408 }
407 this_dbs_info->enable = 1; 409 this_dbs_info->enable = 1;
408 sysfs_create_group(&policy->kobj, &dbs_attr_group); 410 sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
422 def_sampling_rate = (latency / 1000) * 424 def_sampling_rate = (latency / 1000) *
423 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 425 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
424 dbs_tuners_ins.sampling_rate = def_sampling_rate; 426 dbs_tuners_ins.sampling_rate = def_sampling_rate;
427 dbs_tuners_ins.ignore_nice = 0;
425 428
426 dbs_timer_init(); 429 dbs_timer_init();
427 } 430 }
@@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
461 return 0; 464 return 0;
462} 465}
463 466
464struct cpufreq_governor cpufreq_gov_dbs = { 467static struct cpufreq_governor cpufreq_gov_dbs = {
465 .name = "ondemand", 468 .name = "ondemand",
466 .governor = cpufreq_governor_dbs, 469 .governor = cpufreq_governor_dbs,
467 .owner = THIS_MODULE, 470 .owner = THIS_MODULE,
468}; 471};
469EXPORT_SYMBOL(cpufreq_gov_dbs);
470 472
471static int __init cpufreq_gov_dbs_init(void) 473static int __init cpufreq_gov_dbs_init(void)
472{ 474{