aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c74
1 files changed, 48 insertions, 26 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6f45b1658a67..338f428a15b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -21,6 +21,7 @@
21#include <linux/hrtimer.h> 21#include <linux/hrtimer.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/ktime.h> 23#include <linux/ktime.h>
24#include <linux/sched.h>
24 25
25/* 26/*
26 * dbs is used in this file as a shortform for demandbased switching 27 * dbs is used in this file as a shortform for demandbased switching
@@ -51,8 +52,20 @@ static unsigned int def_sampling_rate;
51 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 52 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
52#define MIN_SAMPLING_RATE \ 53#define MIN_SAMPLING_RATE \
53 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 54 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
56 * Define the minimal settable sampling rate to the greater of:
57 * - "HW transition latency" * 100 (same as default sampling / 10)
58 * - MIN_STAT_SAMPLING_RATE
59 * To avoid that userspace shoots itself.
60*/
61static unsigned int minimum_sampling_rate(void)
62{
63 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
64}
65
66/* This will also vanish soon with removing sampling_rate_max */
54#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 67#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
55#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 68#define LATENCY_MULTIPLIER (1000)
56#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 69#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
57 70
58static void do_dbs_timer(struct work_struct *work); 71static void do_dbs_timer(struct work_struct *work);
@@ -65,14 +78,14 @@ struct cpu_dbs_info_s {
65 cputime64_t prev_cpu_wall; 78 cputime64_t prev_cpu_wall;
66 cputime64_t prev_cpu_nice; 79 cputime64_t prev_cpu_nice;
67 struct cpufreq_policy *cur_policy; 80 struct cpufreq_policy *cur_policy;
68 struct delayed_work work; 81 struct delayed_work work;
69 struct cpufreq_frequency_table *freq_table; 82 struct cpufreq_frequency_table *freq_table;
70 unsigned int freq_lo; 83 unsigned int freq_lo;
71 unsigned int freq_lo_jiffies; 84 unsigned int freq_lo_jiffies;
72 unsigned int freq_hi_jiffies; 85 unsigned int freq_hi_jiffies;
73 int cpu; 86 int cpu;
74 unsigned int enable:1, 87 unsigned int enable:1,
75 sample_type:1; 88 sample_type:1;
76}; 89};
77static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 90static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
78 91
@@ -203,12 +216,28 @@ static void ondemand_powersave_bias_init(void)
203/************************** sysfs interface ************************/ 216/************************** sysfs interface ************************/
204static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 217static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
205{ 218{
206 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); 219 static int print_once;
220
221 if (!print_once) {
222 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
223 "sysfs file is deprecated - used by: %s\n",
224 current->comm);
225 print_once = 1;
226 }
227 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
207} 228}
208 229
209static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 230static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
210{ 231{
211 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 232 static int print_once;
233
234 if (!print_once) {
235 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
236 "sysfs file is deprecated - used by: %s\n",
237 current->comm);
238 print_once = 1;
239 }
240 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
212} 241}
213 242
214#define define_one_ro(_name) \ 243#define define_one_ro(_name) \
@@ -238,13 +267,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
238 ret = sscanf(buf, "%u", &input); 267 ret = sscanf(buf, "%u", &input);
239 268
240 mutex_lock(&dbs_mutex); 269 mutex_lock(&dbs_mutex);
241 if (ret != 1 || input > MAX_SAMPLING_RATE 270 if (ret != 1) {
242 || input < MIN_SAMPLING_RATE) {
243 mutex_unlock(&dbs_mutex); 271 mutex_unlock(&dbs_mutex);
244 return -EINVAL; 272 return -EINVAL;
245 } 273 }
246 274 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
247 dbs_tuners_ins.sampling_rate = input;
248 mutex_unlock(&dbs_mutex); 275 mutex_unlock(&dbs_mutex);
249 276
250 return count; 277 return count;
@@ -279,14 +306,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
279 unsigned int j; 306 unsigned int j;
280 307
281 ret = sscanf(buf, "%u", &input); 308 ret = sscanf(buf, "%u", &input);
282 if ( ret != 1 ) 309 if (ret != 1)
283 return -EINVAL; 310 return -EINVAL;
284 311
285 if ( input > 1 ) 312 if (input > 1)
286 input = 1; 313 input = 1;
287 314
288 mutex_lock(&dbs_mutex); 315 mutex_lock(&dbs_mutex);
289 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 316 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
290 mutex_unlock(&dbs_mutex); 317 mutex_unlock(&dbs_mutex);
291 return count; 318 return count;
292 } 319 }
@@ -337,7 +364,7 @@ define_one_rw(up_threshold);
337define_one_rw(ignore_nice_load); 364define_one_rw(ignore_nice_load);
338define_one_rw(powersave_bias); 365define_one_rw(powersave_bias);
339 366
340static struct attribute * dbs_attributes[] = { 367static struct attribute *dbs_attributes[] = {
341 &sampling_rate_max.attr, 368 &sampling_rate_max.attr,
342 &sampling_rate_min.attr, 369 &sampling_rate_min.attr,
343 &sampling_rate.attr, 370 &sampling_rate.attr,
@@ -512,8 +539,7 @@ static void do_dbs_timer(struct work_struct *work)
512 } 539 }
513 } else { 540 } else {
514 __cpufreq_driver_target(dbs_info->cur_policy, 541 __cpufreq_driver_target(dbs_info->cur_policy,
515 dbs_info->freq_lo, 542 dbs_info->freq_lo, CPUFREQ_RELATION_H);
516 CPUFREQ_RELATION_H);
517 } 543 }
518 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 544 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
519 unlock_policy_rwsem_write(cpu); 545 unlock_policy_rwsem_write(cpu);
@@ -530,7 +556,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
530 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 556 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
531 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 557 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
532 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, 558 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
533 delay); 559 delay);
534} 560}
535 561
536static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 562static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -591,11 +617,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
591 if (latency == 0) 617 if (latency == 0)
592 latency = 1; 618 latency = 1;
593 619
594 def_sampling_rate = latency * 620 def_sampling_rate =
595 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 621 max(latency * LATENCY_MULTIPLIER,
596 622 MIN_STAT_SAMPLING_RATE);
597 if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
598 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
599 623
600 dbs_tuners_ins.sampling_rate = def_sampling_rate; 624 dbs_tuners_ins.sampling_rate = def_sampling_rate;
601 } 625 }
@@ -617,12 +641,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
617 mutex_lock(&dbs_mutex); 641 mutex_lock(&dbs_mutex);
618 if (policy->max < this_dbs_info->cur_policy->cur) 642 if (policy->max < this_dbs_info->cur_policy->cur)
619 __cpufreq_driver_target(this_dbs_info->cur_policy, 643 __cpufreq_driver_target(this_dbs_info->cur_policy,
620 policy->max, 644 policy->max, CPUFREQ_RELATION_H);
621 CPUFREQ_RELATION_H);
622 else if (policy->min > this_dbs_info->cur_policy->cur) 645 else if (policy->min > this_dbs_info->cur_policy->cur)
623 __cpufreq_driver_target(this_dbs_info->cur_policy, 646 __cpufreq_driver_target(this_dbs_info->cur_policy,
624 policy->min, 647 policy->min, CPUFREQ_RELATION_L);
625 CPUFREQ_RELATION_L);
626 mutex_unlock(&dbs_mutex); 648 mutex_unlock(&dbs_mutex);
627 break; 649 break;
628 } 650 }
@@ -677,7 +699,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
677MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 699MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
678MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); 700MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
679MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " 701MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
680 "Low Latency Frequency Transition capable processors"); 702 "Low Latency Frequency Transition capable processors");
681MODULE_LICENSE("GPL"); 703MODULE_LICENSE("GPL");
682 704
683#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND 705#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND