diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-03-06 07:25:59 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-03-06 07:25:59 -0500 |
commit | 3b4aff047275574bff8f50a654d51166d9574079 (patch) | |
tree | aaf351cb9536cfc5c4e87a480c7198713c39e39e /drivers/cpufreq | |
parent | 4e97b631f24c927b2302368f4f83efbba82076ee (diff) | |
parent | ad4c2302c20a6906eb2f10defdb0e982bab5eb0b (diff) |
Merge back earlier 'pm-cpufreq' material.
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 107 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 38 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 26 |
4 files changed, 76 insertions, 97 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 4b029c0944af..1fbe11f2a146 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -200,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86" | |||
200 | endmenu | 200 | endmenu |
201 | 201 | ||
202 | menu "ARM CPU frequency scaling drivers" | 202 | menu "ARM CPU frequency scaling drivers" |
203 | depends on ARM | 203 | depends on ARM || ARM64 |
204 | source "drivers/cpufreq/Kconfig.arm" | 204 | source "drivers/cpufreq/Kconfig.arm" |
205 | endmenu | 205 | endmenu |
206 | 206 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cf485d928903..df95c039a21c 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition); | |||
352 | /********************************************************************* | 352 | /********************************************************************* |
353 | * SYSFS INTERFACE * | 353 | * SYSFS INTERFACE * |
354 | *********************************************************************/ | 354 | *********************************************************************/ |
355 | ssize_t show_boost(struct kobject *kobj, | 355 | static ssize_t show_boost(struct kobject *kobj, |
356 | struct attribute *attr, char *buf) | 356 | struct attribute *attr, char *buf) |
357 | { | 357 | { |
358 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); | 358 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); |
@@ -2012,22 +2012,21 @@ EXPORT_SYMBOL(cpufreq_get_policy); | |||
2012 | static int cpufreq_set_policy(struct cpufreq_policy *policy, | 2012 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
2013 | struct cpufreq_policy *new_policy) | 2013 | struct cpufreq_policy *new_policy) |
2014 | { | 2014 | { |
2015 | int ret = 0, failed = 1; | 2015 | struct cpufreq_governor *old_gov; |
2016 | int ret; | ||
2016 | 2017 | ||
2017 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, | 2018 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, |
2018 | new_policy->min, new_policy->max); | 2019 | new_policy->min, new_policy->max); |
2019 | 2020 | ||
2020 | memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); | 2021 | memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); |
2021 | 2022 | ||
2022 | if (new_policy->min > policy->max || new_policy->max < policy->min) { | 2023 | if (new_policy->min > policy->max || new_policy->max < policy->min) |
2023 | ret = -EINVAL; | 2024 | return -EINVAL; |
2024 | goto error_out; | ||
2025 | } | ||
2026 | 2025 | ||
2027 | /* verify the cpu speed can be set within this limit */ | 2026 | /* verify the cpu speed can be set within this limit */ |
2028 | ret = cpufreq_driver->verify(new_policy); | 2027 | ret = cpufreq_driver->verify(new_policy); |
2029 | if (ret) | 2028 | if (ret) |
2030 | goto error_out; | 2029 | return ret; |
2031 | 2030 | ||
2032 | /* adjust if necessary - all reasons */ | 2031 | /* adjust if necessary - all reasons */ |
2033 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 2032 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
@@ -2043,7 +2042,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2043 | */ | 2042 | */ |
2044 | ret = cpufreq_driver->verify(new_policy); | 2043 | ret = cpufreq_driver->verify(new_policy); |
2045 | if (ret) | 2044 | if (ret) |
2046 | goto error_out; | 2045 | return ret; |
2047 | 2046 | ||
2048 | /* notification of the new policy */ | 2047 | /* notification of the new policy */ |
2049 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 2048 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
@@ -2058,58 +2057,48 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2058 | if (cpufreq_driver->setpolicy) { | 2057 | if (cpufreq_driver->setpolicy) { |
2059 | policy->policy = new_policy->policy; | 2058 | policy->policy = new_policy->policy; |
2060 | pr_debug("setting range\n"); | 2059 | pr_debug("setting range\n"); |
2061 | ret = cpufreq_driver->setpolicy(new_policy); | 2060 | return cpufreq_driver->setpolicy(new_policy); |
2062 | } else { | 2061 | } |
2063 | if (new_policy->governor != policy->governor) { | ||
2064 | /* save old, working values */ | ||
2065 | struct cpufreq_governor *old_gov = policy->governor; | ||
2066 | |||
2067 | pr_debug("governor switch\n"); | ||
2068 | |||
2069 | /* end old governor */ | ||
2070 | if (policy->governor) { | ||
2071 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | ||
2072 | up_write(&policy->rwsem); | ||
2073 | __cpufreq_governor(policy, | ||
2074 | CPUFREQ_GOV_POLICY_EXIT); | ||
2075 | down_write(&policy->rwsem); | ||
2076 | } | ||
2077 | 2062 | ||
2078 | /* start new governor */ | 2063 | if (new_policy->governor == policy->governor) |
2079 | policy->governor = new_policy->governor; | 2064 | goto out; |
2080 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { | ||
2081 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { | ||
2082 | failed = 0; | ||
2083 | } else { | ||
2084 | up_write(&policy->rwsem); | ||
2085 | __cpufreq_governor(policy, | ||
2086 | CPUFREQ_GOV_POLICY_EXIT); | ||
2087 | down_write(&policy->rwsem); | ||
2088 | } | ||
2089 | } | ||
2090 | 2065 | ||
2091 | if (failed) { | 2066 | pr_debug("governor switch\n"); |
2092 | /* new governor failed, so re-start old one */ | 2067 | |
2093 | pr_debug("starting governor %s failed\n", | 2068 | /* save old, working values */ |
2094 | policy->governor->name); | 2069 | old_gov = policy->governor; |
2095 | if (old_gov) { | 2070 | /* end old governor */ |
2096 | policy->governor = old_gov; | 2071 | if (old_gov) { |
2097 | __cpufreq_governor(policy, | 2072 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
2098 | CPUFREQ_GOV_POLICY_INIT); | 2073 | up_write(&policy->rwsem); |
2099 | __cpufreq_governor(policy, | 2074 | __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT); |
2100 | CPUFREQ_GOV_START); | 2075 | down_write(&policy->rwsem); |
2101 | } | ||
2102 | ret = -EINVAL; | ||
2103 | goto error_out; | ||
2104 | } | ||
2105 | /* might be a policy change, too, so fall through */ | ||
2106 | } | ||
2107 | pr_debug("governor: change or update limits\n"); | ||
2108 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
2109 | } | 2076 | } |
2110 | 2077 | ||
2111 | error_out: | 2078 | /* start new governor */ |
2112 | return ret; | 2079 | policy->governor = new_policy->governor; |
2080 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { | ||
2081 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) | ||
2082 | goto out; | ||
2083 | |||
2084 | up_write(&policy->rwsem); | ||
2085 | __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | ||
2086 | down_write(&policy->rwsem); | ||
2087 | } | ||
2088 | |||
2089 | /* new governor failed, so re-start old one */ | ||
2090 | pr_debug("starting governor %s failed\n", policy->governor->name); | ||
2091 | if (old_gov) { | ||
2092 | policy->governor = old_gov; | ||
2093 | __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); | ||
2094 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | ||
2095 | } | ||
2096 | |||
2097 | return -EINVAL; | ||
2098 | |||
2099 | out: | ||
2100 | pr_debug("governor: change or update limits\n"); | ||
2101 | return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
2113 | } | 2102 | } |
2114 | 2103 | ||
2115 | /** | 2104 | /** |
@@ -2145,6 +2134,11 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2145 | */ | 2134 | */ |
2146 | if (cpufreq_driver->get) { | 2135 | if (cpufreq_driver->get) { |
2147 | new_policy.cur = cpufreq_driver->get(cpu); | 2136 | new_policy.cur = cpufreq_driver->get(cpu); |
2137 | if (WARN_ON(!new_policy.cur)) { | ||
2138 | ret = -EIO; | ||
2139 | goto no_policy; | ||
2140 | } | ||
2141 | |||
2148 | if (!policy->cur) { | 2142 | if (!policy->cur) { |
2149 | pr_debug("Driver did not initialize current freq"); | 2143 | pr_debug("Driver did not initialize current freq"); |
2150 | policy->cur = new_policy.cur; | 2144 | policy->cur = new_policy.cur; |
@@ -2181,7 +2175,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
2181 | switch (action & ~CPU_TASKS_FROZEN) { | 2175 | switch (action & ~CPU_TASKS_FROZEN) { |
2182 | case CPU_ONLINE: | 2176 | case CPU_ONLINE: |
2183 | __cpufreq_add_dev(dev, NULL, frozen); | 2177 | __cpufreq_add_dev(dev, NULL, frozen); |
2184 | cpufreq_update_policy(cpu); | ||
2185 | break; | 2178 | break; |
2186 | 2179 | ||
2187 | case CPU_DOWN_PREPARE: | 2180 | case CPU_DOWN_PREPARE: |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 5793e1447fb1..eb214d83ad6b 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu) | |||
180 | cpufreq_cpu_put(policy); | 180 | cpufreq_cpu_put(policy); |
181 | } | 181 | } |
182 | 182 | ||
183 | static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, | 183 | static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) |
184 | struct cpufreq_frequency_table *table) | ||
185 | { | 184 | { |
186 | unsigned int i, j, count = 0, ret = 0; | 185 | unsigned int i, j, count = 0, ret = 0; |
187 | struct cpufreq_stats *stat; | 186 | struct cpufreq_stats *stat; |
188 | struct cpufreq_policy *current_policy; | ||
189 | unsigned int alloc_size; | 187 | unsigned int alloc_size; |
190 | unsigned int cpu = policy->cpu; | 188 | unsigned int cpu = policy->cpu; |
189 | struct cpufreq_frequency_table *table; | ||
190 | |||
191 | table = cpufreq_frequency_get_table(cpu); | ||
192 | if (unlikely(!table)) | ||
193 | return 0; | ||
194 | |||
191 | if (per_cpu(cpufreq_stats_table, cpu)) | 195 | if (per_cpu(cpufreq_stats_table, cpu)) |
192 | return -EBUSY; | 196 | return -EBUSY; |
193 | stat = kzalloc(sizeof(*stat), GFP_KERNEL); | 197 | stat = kzalloc(sizeof(*stat), GFP_KERNEL); |
194 | if ((stat) == NULL) | 198 | if ((stat) == NULL) |
195 | return -ENOMEM; | 199 | return -ENOMEM; |
196 | 200 | ||
197 | current_policy = cpufreq_cpu_get(cpu); | 201 | ret = sysfs_create_group(&policy->kobj, &stats_attr_group); |
198 | if (current_policy == NULL) { | ||
199 | ret = -EINVAL; | ||
200 | goto error_get_fail; | ||
201 | } | ||
202 | |||
203 | ret = sysfs_create_group(¤t_policy->kobj, &stats_attr_group); | ||
204 | if (ret) | 202 | if (ret) |
205 | goto error_out; | 203 | goto error_out; |
206 | 204 | ||
@@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, | |||
223 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); | 221 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); |
224 | if (!stat->time_in_state) { | 222 | if (!stat->time_in_state) { |
225 | ret = -ENOMEM; | 223 | ret = -ENOMEM; |
226 | goto error_out; | 224 | goto error_alloc; |
227 | } | 225 | } |
228 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); | 226 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); |
229 | 227 | ||
@@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy, | |||
243 | stat->last_time = get_jiffies_64(); | 241 | stat->last_time = get_jiffies_64(); |
244 | stat->last_index = freq_table_get_index(stat, policy->cur); | 242 | stat->last_index = freq_table_get_index(stat, policy->cur); |
245 | spin_unlock(&cpufreq_stats_lock); | 243 | spin_unlock(&cpufreq_stats_lock); |
246 | cpufreq_cpu_put(current_policy); | ||
247 | return 0; | 244 | return 0; |
245 | error_alloc: | ||
246 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | ||
248 | error_out: | 247 | error_out: |
249 | cpufreq_cpu_put(current_policy); | ||
250 | error_get_fail: | ||
251 | kfree(stat); | 248 | kfree(stat); |
252 | per_cpu(cpufreq_stats_table, cpu) = NULL; | 249 | per_cpu(cpufreq_stats_table, cpu) = NULL; |
253 | return ret; | 250 | return ret; |
@@ -256,7 +253,6 @@ error_get_fail: | |||
256 | static void cpufreq_stats_create_table(unsigned int cpu) | 253 | static void cpufreq_stats_create_table(unsigned int cpu) |
257 | { | 254 | { |
258 | struct cpufreq_policy *policy; | 255 | struct cpufreq_policy *policy; |
259 | struct cpufreq_frequency_table *table; | ||
260 | 256 | ||
261 | /* | 257 | /* |
262 | * "likely(!policy)" because normally cpufreq_stats will be registered | 258 | * "likely(!policy)" because normally cpufreq_stats will be registered |
@@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu) | |||
266 | if (likely(!policy)) | 262 | if (likely(!policy)) |
267 | return; | 263 | return; |
268 | 264 | ||
269 | table = cpufreq_frequency_get_table(policy->cpu); | 265 | __cpufreq_stats_create_table(policy); |
270 | if (likely(table)) | ||
271 | __cpufreq_stats_create_table(policy, table); | ||
272 | 266 | ||
273 | cpufreq_cpu_put(policy); | 267 | cpufreq_cpu_put(policy); |
274 | } | 268 | } |
@@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | |||
291 | { | 285 | { |
292 | int ret = 0; | 286 | int ret = 0; |
293 | struct cpufreq_policy *policy = data; | 287 | struct cpufreq_policy *policy = data; |
294 | struct cpufreq_frequency_table *table; | ||
295 | unsigned int cpu = policy->cpu; | ||
296 | 288 | ||
297 | if (val == CPUFREQ_UPDATE_POLICY_CPU) { | 289 | if (val == CPUFREQ_UPDATE_POLICY_CPU) { |
298 | cpufreq_stats_update_policy_cpu(policy); | 290 | cpufreq_stats_update_policy_cpu(policy); |
299 | return 0; | 291 | return 0; |
300 | } | 292 | } |
301 | 293 | ||
302 | table = cpufreq_frequency_get_table(cpu); | ||
303 | if (!table) | ||
304 | return 0; | ||
305 | |||
306 | if (val == CPUFREQ_CREATE_POLICY) | 294 | if (val == CPUFREQ_CREATE_POLICY) |
307 | ret = __cpufreq_stats_create_table(policy, table); | 295 | ret = __cpufreq_stats_create_table(policy); |
308 | else if (val == CPUFREQ_REMOVE_POLICY) | 296 | else if (val == CPUFREQ_REMOVE_POLICY) |
309 | __cpufreq_stats_free_table(policy); | 297 | __cpufreq_stats_free_table(policy); |
310 | 298 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2cd36b9297f3..9ab109c0f90c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -99,8 +99,7 @@ struct cpudata { | |||
99 | u64 prev_aperf; | 99 | u64 prev_aperf; |
100 | u64 prev_mperf; | 100 | u64 prev_mperf; |
101 | unsigned long long prev_tsc; | 101 | unsigned long long prev_tsc; |
102 | int sample_ptr; | 102 | struct sample sample; |
103 | struct sample samples[SAMPLE_COUNT]; | ||
104 | }; | 103 | }; |
105 | 104 | ||
106 | static struct cpudata **all_cpu_data; | 105 | static struct cpudata **all_cpu_data; |
@@ -154,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | |||
154 | pid->setpoint = setpoint; | 153 | pid->setpoint = setpoint; |
155 | pid->deadband = deadband; | 154 | pid->deadband = deadband; |
156 | pid->integral = int_tofp(integral); | 155 | pid->integral = int_tofp(integral); |
157 | pid->last_err = setpoint - busy; | 156 | pid->last_err = int_tofp(setpoint) - int_tofp(busy); |
158 | } | 157 | } |
159 | 158 | ||
160 | static inline void pid_p_gain_set(struct _pid *pid, int percent) | 159 | static inline void pid_p_gain_set(struct _pid *pid, int percent) |
@@ -586,15 +585,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu) | |||
586 | mperf = mperf >> FRAC_BITS; | 585 | mperf = mperf >> FRAC_BITS; |
587 | tsc = tsc >> FRAC_BITS; | 586 | tsc = tsc >> FRAC_BITS; |
588 | 587 | ||
589 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; | 588 | cpu->sample.aperf = aperf; |
590 | cpu->samples[cpu->sample_ptr].aperf = aperf; | 589 | cpu->sample.mperf = mperf; |
591 | cpu->samples[cpu->sample_ptr].mperf = mperf; | 590 | cpu->sample.tsc = tsc; |
592 | cpu->samples[cpu->sample_ptr].tsc = tsc; | 591 | cpu->sample.aperf -= cpu->prev_aperf; |
593 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; | 592 | cpu->sample.mperf -= cpu->prev_mperf; |
594 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; | 593 | cpu->sample.tsc -= cpu->prev_tsc; |
595 | cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc; | ||
596 | 594 | ||
597 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); | 595 | intel_pstate_calc_busy(cpu, &cpu->sample); |
598 | 596 | ||
599 | cpu->prev_aperf = aperf; | 597 | cpu->prev_aperf = aperf; |
600 | cpu->prev_mperf = mperf; | 598 | cpu->prev_mperf = mperf; |
@@ -614,7 +612,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) | |||
614 | { | 612 | { |
615 | int32_t core_busy, max_pstate, current_pstate; | 613 | int32_t core_busy, max_pstate, current_pstate; |
616 | 614 | ||
617 | core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; | 615 | core_busy = cpu->sample.core_pct_busy; |
618 | max_pstate = int_tofp(cpu->pstate.max_pstate); | 616 | max_pstate = int_tofp(cpu->pstate.max_pstate); |
619 | current_pstate = int_tofp(cpu->pstate.current_pstate); | 617 | current_pstate = int_tofp(cpu->pstate.current_pstate); |
620 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); | 618 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); |
@@ -648,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
648 | 646 | ||
649 | intel_pstate_sample(cpu); | 647 | intel_pstate_sample(cpu); |
650 | 648 | ||
651 | sample = &cpu->samples[cpu->sample_ptr]; | 649 | sample = &cpu->sample; |
652 | 650 | ||
653 | intel_pstate_adjust_busy_pstate(cpu); | 651 | intel_pstate_adjust_busy_pstate(cpu); |
654 | 652 | ||
@@ -729,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
729 | cpu = all_cpu_data[cpu_num]; | 727 | cpu = all_cpu_data[cpu_num]; |
730 | if (!cpu) | 728 | if (!cpu) |
731 | return 0; | 729 | return 0; |
732 | sample = &cpu->samples[cpu->sample_ptr]; | 730 | sample = &cpu->sample; |
733 | return sample->freq; | 731 | return sample->freq; |
734 | } | 732 | } |
735 | 733 | ||