aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 18:59:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 18:59:39 -0400
commit40031da445fb4d269af9c7c445b2adf674f171e7 (patch)
tree021df7906708e939dee9978669a5461b12ff1296 /drivers/cpufreq/cpufreq_ondemand.c
parentdcaaaeac871ff73043c616db3b2f91482637801d (diff)
parentf41b83126cba53849dd2353476a7715613af648f (diff)
Merge tag 'pm+acpi-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: 1) ACPI-based PCI hotplug (ACPIPHP) subsystem rework and introduction of Intel Thunderbolt support on systems that use ACPI for signalling Thunderbolt hotplug events. This also should make ACPIPHP work in some cases in which it was known to have problems. From Rafael J Wysocki, Mika Westerberg and Kirill A Shutemov. 2) ACPI core code cleanups and dock station support cleanups from Jiang Liu and Rafael J Wysocki. 3) Fixes for locking problems related to ACPI device hotplug from Rafael J Wysocki. 4) ACPICA update to version 20130725 includig fixes, cleanups, support for more than 256 GPEs per GPE block and a change to make the ACPI PM Timer optional (we've seen systems without the PM Timer in the field already). One of the fixes, related to the DeRefOf operator, is necessary to prevent some Windows 8 oriented AML from causing problems to happen. From Bob Moore, Lv Zheng, and Jung-uk Kim. 5) Removal of the old and long deprecated /proc/acpi/event interface and related driver changes from Thomas Renninger. 6) ACPI and Xen changes to make the reduced hardware sleep work with the latter from Ben Guthro. 7) ACPI video driver cleanups and a blacklist of systems that should not tell the BIOS that they are compatible with Windows 8 (or ACPI backlight and possibly other things will not work on them). From Felipe Contreras. 8) Assorted ACPI fixes and cleanups from Aaron Lu, Hanjun Guo, Kuppuswamy Sathyanarayanan, Lan Tianyu, Sachin Kamat, Tang Chen, Toshi Kani, and Wei Yongjun. 9) cpufreq ondemand governor target frequency selection change to reduce oscillations between min and max frequencies (essentially, it causes the governor to choose target frequencies proportional to load) from Stratos Karafotis. 10) cpufreq fixes allowing sysfs attributes file permissions to be preserved over suspend/resume cycles Srivatsa S Bhat. 11) Removal of Device Tree parsing for CPU device nodes from multiple cpufreq drivers that required some changes related to of_get_cpu_node() to be made in a few architectures and in the driver core. From Sudeep KarkadaNagesha. 12) cpufreq core fixes and cleanups related to mutual exclusion and driver module references from Viresh Kumar, Lukasz Majewski and Rafael J Wysocki. 13) Assorted cpufreq fixes and cleanups from Amit Daniel Kachhap, Bartlomiej Zolnierkiewicz, Hanjun Guo, Jingoo Han, Joseph Lo, Julia Lawall, Li Zhong, Mark Brown, Sascha Hauer, Stephen Boyd, Stratos Karafotis, and Viresh Kumar. 14) Fixes to prevent race conditions in coupled cpuidle from happening from Colin Cross. 15) cpuidle core fixes and cleanups from Daniel Lezcano and Tuukka Tikkanen. 16) Assorted cpuidle fixes and cleanups from Daniel Lezcano, Geert Uytterhoeven, Jingoo Han, Julia Lawall, Linus Walleij, and Sahara. 17) System sleep tracing changes from Todd E Brandt and Shuah Khan. 18) PNP subsystem conversion to using struct dev_pm_ops for power management from Shuah Khan. * tag 'pm+acpi-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (217 commits) cpufreq: Don't use smp_processor_id() in preemptible context cpuidle: coupled: fix race condition between pokes and safe state cpuidle: coupled: abort idle if pokes are pending cpuidle: coupled: disable interrupts after entering safe state ACPI / hotplug: Remove containers synchronously driver core / ACPI: Avoid device hot remove locking issues cpufreq: governor: Fix typos in comments cpufreq: governors: Remove duplicate check of target freq in supported range cpufreq: Fix timer/workqueue corruption due to double queueing ACPI / EC: Add ASUSTEK L4R to quirk list in order to validate ECDT ACPI / thermal: Add check of "_TZD" availability and evaluating result cpufreq: imx6q: Fix clock enable balance ACPI: blacklist win8 OSI for buggy laptops cpufreq: tegra: fix the wrong clock name cpuidle: Change struct menu_device field types cpuidle: Add a comment warning about possible overflow cpuidle: Fix variable domains in get_typical_interval() cpuidle: Fix menu_device->intervals type cpuidle: CodingStyle: Break up multiple assignments on single line cpuidle: Check called function parameter in get_typical_interval() ...
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c66
1 files changed, 14 insertions, 52 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c087347d6688..32f26f6e17c5 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -12,28 +12,16 @@
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 14
15#include <linux/cpufreq.h> 15#include <linux/cpu.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/kernel_stat.h>
19#include <linux/kobject.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/percpu-defs.h> 16#include <linux/percpu-defs.h>
23#include <linux/slab.h> 17#include <linux/slab.h>
24#include <linux/sysfs.h>
25#include <linux/tick.h> 18#include <linux/tick.h>
26#include <linux/types.h>
27#include <linux/cpu.h>
28
29#include "cpufreq_governor.h" 19#include "cpufreq_governor.h"
30 20
31/* On-demand governor macros */ 21/* On-demand governor macros */
32#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
33#define DEF_FREQUENCY_UP_THRESHOLD (80) 22#define DEF_FREQUENCY_UP_THRESHOLD (80)
34#define DEF_SAMPLING_DOWN_FACTOR (1) 23#define DEF_SAMPLING_DOWN_FACTOR (1)
35#define MAX_SAMPLING_DOWN_FACTOR (100000) 24#define MAX_SAMPLING_DOWN_FACTOR (100000)
36#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
37#define MICRO_FREQUENCY_UP_THRESHOLD (95) 25#define MICRO_FREQUENCY_UP_THRESHOLD (95)
38#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) 26#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
39#define MIN_FREQUENCY_UP_THRESHOLD (11) 27#define MIN_FREQUENCY_UP_THRESHOLD (11)
@@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
144 } 132 }
145} 133}
146 134
147static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 135static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
148{ 136{
149 struct dbs_data *dbs_data = p->governor_data; 137 struct dbs_data *dbs_data = policy->governor_data;
150 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 138 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
151 139
152 if (od_tuners->powersave_bias) 140 if (od_tuners->powersave_bias)
153 freq = od_ops.powersave_bias_target(p, freq, 141 freq = od_ops.powersave_bias_target(policy, freq,
154 CPUFREQ_RELATION_H); 142 CPUFREQ_RELATION_H);
155 else if (p->cur == p->max) 143 else if (policy->cur == policy->max)
156 return; 144 return;
157 145
158 __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? 146 __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
159 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); 147 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
160} 148}
161 149
162/* 150/*
163 * Every sampling_rate, we check, if current idle time is less than 20% 151 * Every sampling_rate, we check, if current idle time is less than 20%
164 * (default), then we try to increase frequency. Every sampling_rate, we look 152 * (default), then we try to increase frequency. Else, we adjust the frequency
165 * for the lowest frequency which can sustain the load while keeping idle time 153 * proportional to load.
166 * over 30%. If such a frequency exist, we try to decrease to this frequency.
167 *
168 * Any frequency increase takes it to the maximum frequency. Frequency reduction
169 * happens at minimum steps of 5% (default) of current frequency
170 */ 154 */
171static void od_check_cpu(int cpu, unsigned int load_freq) 155static void od_check_cpu(int cpu, unsigned int load)
172{ 156{
173 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
174 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; 158 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,36 +162,21 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
178 dbs_info->freq_lo = 0; 162 dbs_info->freq_lo = 0;
179 163
180 /* Check for frequency increase */ 164 /* Check for frequency increase */
181 if (load_freq > od_tuners->up_threshold * policy->cur) { 165 if (load > od_tuners->up_threshold) {
182 /* If switching to max speed, apply sampling_down_factor */ 166 /* If switching to max speed, apply sampling_down_factor */
183 if (policy->cur < policy->max) 167 if (policy->cur < policy->max)
184 dbs_info->rate_mult = 168 dbs_info->rate_mult =
185 od_tuners->sampling_down_factor; 169 od_tuners->sampling_down_factor;
186 dbs_freq_increase(policy, policy->max); 170 dbs_freq_increase(policy, policy->max);
187 return; 171 return;
188 } 172 } else {
189 173 /* Calculate the next frequency proportional to load */
190 /* Check for frequency decrease */
191 /* if we cannot reduce the frequency anymore, break out early */
192 if (policy->cur == policy->min)
193 return;
194
195 /*
196 * The optimal frequency is the frequency that is the lowest that can
197 * support the current CPU usage without triggering the up policy. To be
198 * safe, we focus 10 points under the threshold.
199 */
200 if (load_freq < od_tuners->adj_up_threshold
201 * policy->cur) {
202 unsigned int freq_next; 174 unsigned int freq_next;
203 freq_next = load_freq / od_tuners->adj_up_threshold; 175 freq_next = load * policy->cpuinfo.max_freq / 100;
204 176
205 /* No longer fully busy, reset rate_mult */ 177 /* No longer fully busy, reset rate_mult */
206 dbs_info->rate_mult = 1; 178 dbs_info->rate_mult = 1;
207 179
208 if (freq_next < policy->min)
209 freq_next = policy->min;
210
211 if (!od_tuners->powersave_bias) { 180 if (!od_tuners->powersave_bias) {
212 __cpufreq_driver_target(policy, freq_next, 181 __cpufreq_driver_target(policy, freq_next,
213 CPUFREQ_RELATION_L); 182 CPUFREQ_RELATION_L);
@@ -374,9 +343,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
374 input < MIN_FREQUENCY_UP_THRESHOLD) { 343 input < MIN_FREQUENCY_UP_THRESHOLD) {
375 return -EINVAL; 344 return -EINVAL;
376 } 345 }
377 /* Calculate the new adj_up_threshold */
378 od_tuners->adj_up_threshold += input;
379 od_tuners->adj_up_threshold -= od_tuners->up_threshold;
380 346
381 od_tuners->up_threshold = input; 347 od_tuners->up_threshold = input;
382 return count; 348 return count;
@@ -513,7 +479,7 @@ static int od_init(struct dbs_data *dbs_data)
513 u64 idle_time; 479 u64 idle_time;
514 int cpu; 480 int cpu;
515 481
516 tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); 482 tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
517 if (!tuners) { 483 if (!tuners) {
518 pr_err("%s: kzalloc failed\n", __func__); 484 pr_err("%s: kzalloc failed\n", __func__);
519 return -ENOMEM; 485 return -ENOMEM;
@@ -525,8 +491,6 @@ static int od_init(struct dbs_data *dbs_data)
525 if (idle_time != -1ULL) { 491 if (idle_time != -1ULL) {
526 /* Idle micro accounting is supported. Use finer thresholds */ 492 /* Idle micro accounting is supported. Use finer thresholds */
527 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; 493 tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
528 tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
529 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
530 /* 494 /*
531 * In nohz/micro accounting case we set the minimum frequency 495 * In nohz/micro accounting case we set the minimum frequency
532 * not depending on HZ, but fixed (very low). The deferred 496 * not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +499,6 @@ static int od_init(struct dbs_data *dbs_data)
535 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; 499 dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
536 } else { 500 } else {
537 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; 501 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
538 tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
539 DEF_FREQUENCY_DOWN_DIFFERENTIAL;
540 502
541 /* For correct statistics, we need 10 ticks for each measure */ 503 /* For correct statistics, we need 10 ticks for each measure */
542 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * 504 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *