diff options
| -rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 121 |
1 files changed, 60 insertions, 61 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 57d02e990af3..1bba99747f5b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -37,17 +37,17 @@ | |||
| 37 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 37 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
| 38 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | 38 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * The polling frequency of this governor depends on the capability of | 41 | * The polling frequency of this governor depends on the capability of |
| 42 | * the processor. Default polling frequency is 1000 times the transition | 42 | * the processor. Default polling frequency is 1000 times the transition |
| 43 | * latency of the processor. The governor will work on any processor with | 43 | * latency of the processor. The governor will work on any processor with |
| 44 | * transition latency <= 10mS, using appropriate sampling | 44 | * transition latency <= 10mS, using appropriate sampling |
| 45 | * rate. | 45 | * rate. |
| 46 | * For CPUs with transition latency > 10mS (mostly drivers | 46 | * For CPUs with transition latency > 10mS (mostly drivers |
| 47 | * with CPUFREQ_ETERNAL), this governor will not work. | 47 | * with CPUFREQ_ETERNAL), this governor will not work. |
| 48 | * All times here are in uS. | 48 | * All times here are in uS. |
| 49 | */ | 49 | */ |
| 50 | static unsigned int def_sampling_rate; | 50 | static unsigned int def_sampling_rate; |
| 51 | #define MIN_SAMPLING_RATE_RATIO (2) | 51 | #define MIN_SAMPLING_RATE_RATIO (2) |
| 52 | /* for correct statistics, we need at least 10 ticks between each measure */ | 52 | /* for correct statistics, we need at least 10 ticks between each measure */ |
| 53 | #define MIN_STAT_SAMPLING_RATE \ | 53 | #define MIN_STAT_SAMPLING_RATE \ |
| @@ -63,12 +63,12 @@ static unsigned int def_sampling_rate; | |||
| 63 | static void do_dbs_timer(struct work_struct *work); | 63 | static void do_dbs_timer(struct work_struct *work); |
| 64 | 64 | ||
| 65 | struct cpu_dbs_info_s { | 65 | struct cpu_dbs_info_s { |
| 66 | struct cpufreq_policy *cur_policy; | 66 | struct cpufreq_policy *cur_policy; |
| 67 | unsigned int prev_cpu_idle_up; | 67 | unsigned int prev_cpu_idle_up; |
| 68 | unsigned int prev_cpu_idle_down; | 68 | unsigned int prev_cpu_idle_down; |
| 69 | unsigned int enable; | 69 | unsigned int enable; |
| 70 | unsigned int down_skip; | 70 | unsigned int down_skip; |
| 71 | unsigned int requested_freq; | 71 | unsigned int requested_freq; |
| 72 | }; | 72 | }; |
| 73 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 73 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
| 74 | 74 | ||
| @@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
| 82 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | 82 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock |
| 83 | * is recursive for the same process. -Venki | 83 | * is recursive for the same process. -Venki |
| 84 | */ | 84 | */ |
| 85 | static DEFINE_MUTEX (dbs_mutex); | 85 | static DEFINE_MUTEX (dbs_mutex); |
| 86 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); | 86 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); |
| 87 | 87 | ||
| 88 | struct dbs_tuners { | 88 | struct dbs_tuners { |
| 89 | unsigned int sampling_rate; | 89 | unsigned int sampling_rate; |
| 90 | unsigned int sampling_down_factor; | 90 | unsigned int sampling_down_factor; |
| 91 | unsigned int up_threshold; | 91 | unsigned int up_threshold; |
| 92 | unsigned int down_threshold; | 92 | unsigned int down_threshold; |
| 93 | unsigned int ignore_nice; | 93 | unsigned int ignore_nice; |
| 94 | unsigned int freq_step; | 94 | unsigned int freq_step; |
| 95 | }; | 95 | }; |
| 96 | 96 | ||
| 97 | static struct dbs_tuners dbs_tuners_ins = { | 97 | static struct dbs_tuners dbs_tuners_ins = { |
| 98 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 98 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
| 99 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | 99 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, |
| 100 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 100 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
| 101 | .ignore_nice = 0, | 101 | .ignore_nice = 0, |
| 102 | .freq_step = 5, | 102 | .freq_step = 5, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) | 105 | static inline unsigned int get_cpu_idle_time(unsigned int cpu) |
| @@ -109,7 +109,7 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu) | |||
| 109 | if (dbs_tuners_ins.ignore_nice) | 109 | if (dbs_tuners_ins.ignore_nice) |
| 110 | add_nice = kstat_cpu(cpu).cpustat.nice; | 110 | add_nice = kstat_cpu(cpu).cpustat.nice; |
| 111 | 111 | ||
| 112 | ret = kstat_cpu(cpu).cpustat.idle + | 112 | ret = kstat_cpu(cpu).cpustat.idle + |
| 113 | kstat_cpu(cpu).cpustat.iowait + | 113 | kstat_cpu(cpu).cpustat.iowait + |
| 114 | add_nice; | 114 | add_nice; |
| 115 | 115 | ||
| @@ -148,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | |||
| 148 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); | 148 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | #define define_one_ro(_name) \ | 151 | #define define_one_ro(_name) \ |
| 152 | static struct freq_attr _name = \ | 152 | static struct freq_attr _name = \ |
| 153 | __ATTR(_name, 0444, show_##_name, NULL) | 153 | __ATTR(_name, 0444, show_##_name, NULL) |
| 154 | 154 | ||
| 155 | define_one_ro(sampling_rate_max); | 155 | define_one_ro(sampling_rate_max); |
| @@ -169,7 +169,7 @@ show_one(down_threshold, down_threshold); | |||
| 169 | show_one(ignore_nice_load, ignore_nice); | 169 | show_one(ignore_nice_load, ignore_nice); |
| 170 | show_one(freq_step, freq_step); | 170 | show_one(freq_step, freq_step); |
| 171 | 171 | ||
| 172 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | 172 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, |
| 173 | const char *buf, size_t count) | 173 | const char *buf, size_t count) |
| 174 | { | 174 | { |
| 175 | unsigned int input; | 175 | unsigned int input; |
| @@ -185,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | |||
| 185 | return count; | 185 | return count; |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | 188 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, |
| 189 | const char *buf, size_t count) | 189 | const char *buf, size_t count) |
| 190 | { | 190 | { |
| 191 | unsigned int input; | 191 | unsigned int input; |
| @@ -204,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |||
| 204 | return count; | 204 | return count; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, | 207 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, |
| 208 | const char *buf, size_t count) | 208 | const char *buf, size_t count) |
| 209 | { | 209 | { |
| 210 | unsigned int input; | 210 | unsigned int input; |
| @@ -223,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
| 223 | return count; | 223 | return count; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, | 226 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, |
| 227 | const char *buf, size_t count) | 227 | const char *buf, size_t count) |
| 228 | { | 228 | { |
| 229 | unsigned int input; | 229 | unsigned int input; |
| @@ -249,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
| 249 | int ret; | 249 | int ret; |
| 250 | 250 | ||
| 251 | unsigned int j; | 251 | unsigned int j; |
| 252 | 252 | ||
| 253 | ret = sscanf (buf, "%u", &input); | 253 | ret = sscanf(buf, "%u", &input); |
| 254 | if ( ret != 1 ) | 254 | if (ret != 1) |
| 255 | return -EINVAL; | 255 | return -EINVAL; |
| 256 | 256 | ||
| 257 | if ( input > 1 ) | 257 | if (input > 1) |
| 258 | input = 1; | 258 | input = 1; |
| 259 | 259 | ||
| 260 | mutex_lock(&dbs_mutex); | 260 | mutex_lock(&dbs_mutex); |
| 261 | if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ | 261 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ |
| 262 | mutex_unlock(&dbs_mutex); | 262 | mutex_unlock(&dbs_mutex); |
| 263 | return count; | 263 | return count; |
| 264 | } | 264 | } |
| @@ -282,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy, | |||
| 282 | unsigned int input; | 282 | unsigned int input; |
| 283 | int ret; | 283 | int ret; |
| 284 | 284 | ||
| 285 | ret = sscanf (buf, "%u", &input); | 285 | ret = sscanf(buf, "%u", &input); |
| 286 | 286 | ||
| 287 | if ( ret != 1 ) | 287 | if (ret != 1) |
| 288 | return -EINVAL; | 288 | return -EINVAL; |
| 289 | 289 | ||
| 290 | if ( input > 100 ) | 290 | if (input > 100) |
| 291 | input = 100; | 291 | input = 100; |
| 292 | 292 | ||
| 293 | /* no need to test here if freq_step is zero as the user might actually | 293 | /* no need to test here if freq_step is zero as the user might actually |
| 294 | * want this, they would be crazy though :) */ | 294 | * want this, they would be crazy though :) */ |
| 295 | mutex_lock(&dbs_mutex); | 295 | mutex_lock(&dbs_mutex); |
| @@ -343,18 +343,18 @@ static void dbs_check_cpu(int cpu) | |||
| 343 | 343 | ||
| 344 | policy = this_dbs_info->cur_policy; | 344 | policy = this_dbs_info->cur_policy; |
| 345 | 345 | ||
| 346 | /* | 346 | /* |
| 347 | * The default safe range is 20% to 80% | 347 | * The default safe range is 20% to 80% |
| 348 | * Every sampling_rate, we check | 348 | * Every sampling_rate, we check |
| 349 | * - If current idle time is less than 20%, then we try to | 349 | * - If current idle time is less than 20%, then we try to |
| 350 | * increase frequency | 350 | * increase frequency |
| 351 | * Every sampling_rate*sampling_down_factor, we check | 351 | * Every sampling_rate*sampling_down_factor, we check |
| 352 | * - If current idle time is more than 80%, then we try to | 352 | * - If current idle time is more than 80%, then we try to |
| 353 | * decrease frequency | 353 | * decrease frequency |
| 354 | * | 354 | * |
| 355 | * Any frequency increase takes it to the maximum frequency. | 355 | * Any frequency increase takes it to the maximum frequency. |
| 356 | * Frequency reduction happens at minimum steps of | 356 | * Frequency reduction happens at minimum steps of |
| 357 | * 5% (default) of max_frequency | 357 | * 5% (default) of max_frequency |
| 358 | */ | 358 | */ |
| 359 | 359 | ||
| 360 | /* Check for frequency increase */ | 360 | /* Check for frequency increase */ |
| @@ -382,13 +382,13 @@ static void dbs_check_cpu(int cpu) | |||
| 382 | /* if we are already at full speed then break out early */ | 382 | /* if we are already at full speed then break out early */ |
| 383 | if (this_dbs_info->requested_freq == policy->max) | 383 | if (this_dbs_info->requested_freq == policy->max) |
| 384 | return; | 384 | return; |
| 385 | 385 | ||
| 386 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | 386 | freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; |
| 387 | 387 | ||
| 388 | /* max freq cannot be less than 100. But who knows.... */ | 388 | /* max freq cannot be less than 100. But who knows.... */ |
| 389 | if (unlikely(freq_step == 0)) | 389 | if (unlikely(freq_step == 0)) |
| 390 | freq_step = 5; | 390 | freq_step = 5; |
| 391 | 391 | ||
| 392 | this_dbs_info->requested_freq += freq_step; | 392 | this_dbs_info->requested_freq += freq_step; |
| 393 | if (this_dbs_info->requested_freq > policy->max) | 393 | if (this_dbs_info->requested_freq > policy->max) |
| 394 | this_dbs_info->requested_freq = policy->max; | 394 | this_dbs_info->requested_freq = policy->max; |
| @@ -448,15 +448,15 @@ static void dbs_check_cpu(int cpu) | |||
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | static void do_dbs_timer(struct work_struct *work) | 450 | static void do_dbs_timer(struct work_struct *work) |
| 451 | { | 451 | { |
| 452 | int i; | 452 | int i; |
| 453 | mutex_lock(&dbs_mutex); | 453 | mutex_lock(&dbs_mutex); |
| 454 | for_each_online_cpu(i) | 454 | for_each_online_cpu(i) |
| 455 | dbs_check_cpu(i); | 455 | dbs_check_cpu(i); |
| 456 | schedule_delayed_work(&dbs_work, | 456 | schedule_delayed_work(&dbs_work, |
| 457 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 457 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
| 458 | mutex_unlock(&dbs_mutex); | 458 | mutex_unlock(&dbs_mutex); |
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | static inline void dbs_timer_init(void) | 461 | static inline void dbs_timer_init(void) |
| 462 | { | 462 | { |
| @@ -483,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 483 | 483 | ||
| 484 | switch (event) { | 484 | switch (event) { |
| 485 | case CPUFREQ_GOV_START: | 485 | case CPUFREQ_GOV_START: |
| 486 | if ((!cpu_online(cpu)) || | 486 | if ((!cpu_online(cpu)) || (!policy->cur)) |
| 487 | (!policy->cur)) | ||
| 488 | return -EINVAL; | 487 | return -EINVAL; |
| 489 | 488 | ||
| 490 | if (this_dbs_info->enable) /* Already enabled */ | 489 | if (this_dbs_info->enable) /* Already enabled */ |
| 491 | break; | 490 | break; |
| 492 | 491 | ||
| 493 | mutex_lock(&dbs_mutex); | 492 | mutex_lock(&dbs_mutex); |
| 494 | 493 | ||
| 495 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 494 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
| @@ -502,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 502 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
| 503 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
| 504 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
| 505 | 504 | ||
| 506 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); | 505 | j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); |
| 507 | j_dbs_info->prev_cpu_idle_down | 506 | j_dbs_info->prev_cpu_idle_down |
| 508 | = j_dbs_info->prev_cpu_idle_up; | 507 | = j_dbs_info->prev_cpu_idle_up; |
| @@ -536,7 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 536 | &dbs_cpufreq_notifier_block, | 535 | &dbs_cpufreq_notifier_block, |
| 537 | CPUFREQ_TRANSITION_NOTIFIER); | 536 | CPUFREQ_TRANSITION_NOTIFIER); |
| 538 | } | 537 | } |
| 539 | 538 | ||
| 540 | mutex_unlock(&dbs_mutex); | 539 | mutex_unlock(&dbs_mutex); |
| 541 | break; | 540 | break; |
| 542 | 541 | ||
| @@ -565,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 565 | if (policy->max < this_dbs_info->cur_policy->cur) | 564 | if (policy->max < this_dbs_info->cur_policy->cur) |
| 566 | __cpufreq_driver_target( | 565 | __cpufreq_driver_target( |
| 567 | this_dbs_info->cur_policy, | 566 | this_dbs_info->cur_policy, |
| 568 | policy->max, CPUFREQ_RELATION_H); | 567 | policy->max, CPUFREQ_RELATION_H); |
| 569 | else if (policy->min > this_dbs_info->cur_policy->cur) | 568 | else if (policy->min > this_dbs_info->cur_policy->cur) |
| 570 | __cpufreq_driver_target( | 569 | __cpufreq_driver_target( |
| 571 | this_dbs_info->cur_policy, | 570 | this_dbs_info->cur_policy, |
| 572 | policy->min, CPUFREQ_RELATION_L); | 571 | policy->min, CPUFREQ_RELATION_L); |
| 573 | mutex_unlock(&dbs_mutex); | 572 | mutex_unlock(&dbs_mutex); |
| 574 | break; | 573 | break; |
| 575 | } | 574 | } |
