diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-11-06 19:30:49 -0500 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-11-06 19:30:49 -0500 |
| commit | 1f47b0ddf3ebebe632b96b29a0033505d5adfe8b (patch) | |
| tree | a53aa919666ceadd49c4b7e76c22b234338421fa /drivers/cpufreq | |
| parent | 1ede53f73171722fcf1b5be63bcd64e42d14affc (diff) | |
| parent | d7e53e35f9f54cdfa09a8456ae8e9874ec66bb36 (diff) | |
Merge branch 'pm-cpufreq'
* pm-cpufreq:
cpufreq: s5pv210-cpufreq: fix wrong do_div() usage
MAINTAINERS: update for intel P-state driver
cpufreq: governor: Quit work-handlers early if governor is stopped
intel_pstate: decrease number of "HWP enabled" messages
cpufreq: arm_big_little: fix frequency check when bL switcher is active
Diffstat (limited to 'drivers/cpufreq')
| -rw-r--r-- | drivers/cpufreq/arm_big_little.c | 22 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 33 | ||||
| -rw-r--r-- | drivers/cpufreq/intel_pstate.c | 10 | ||||
| -rw-r--r-- | drivers/cpufreq/s5pv210-cpufreq.c | 4 |
4 files changed, 44 insertions, 25 deletions
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index f1e42f8ce0fc..c5d256caa664 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
| @@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) | |||
| 149 | __func__, cpu, old_cluster, new_cluster, new_rate); | 149 | __func__, cpu, old_cluster, new_cluster, new_rate); |
| 150 | 150 | ||
| 151 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); | 151 | ret = clk_set_rate(clk[new_cluster], new_rate * 1000); |
| 152 | if (!ret) { | ||
| 153 | /* | ||
| 154 | * FIXME: clk_set_rate hasn't returned an error here however it | ||
| 155 | * may be that clk_change_rate failed due to hardware or | ||
| 156 | * firmware issues and wasn't able to report that due to the | ||
| 157 | * current design of the clk core layer. To work around this | ||
| 158 | * problem we will read back the clock rate and check it is | ||
| 159 | * correct. This needs to be removed once clk core is fixed. | ||
| 160 | */ | ||
| 161 | if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) | ||
| 162 | ret = -EIO; | ||
| 163 | } | ||
| 164 | |||
| 152 | if (WARN_ON(ret)) { | 165 | if (WARN_ON(ret)) { |
| 153 | pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, | 166 | pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, |
| 154 | new_cluster); | 167 | new_cluster); |
| @@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) | |||
| 189 | mutex_unlock(&cluster_lock[old_cluster]); | 202 | mutex_unlock(&cluster_lock[old_cluster]); |
| 190 | } | 203 | } |
| 191 | 204 | ||
| 192 | /* | ||
| 193 | * FIXME: clk_set_rate has to handle the case where clk_change_rate | ||
| 194 | * can fail due to hardware or firmware issues. Until the clk core | ||
| 195 | * layer is fixed, we can check here. In most of the cases we will | ||
| 196 | * be reading only the cached value anyway. This needs to be removed | ||
| 197 | * once clk core is fixed. | ||
| 198 | */ | ||
| 199 | if (bL_cpufreq_get_rate(cpu) != new_rate) | ||
| 200 | return -EIO; | ||
| 201 | return 0; | 205 | return 0; |
| 202 | } | 206 | } |
| 203 | 207 | ||
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 11258c4c1b17..b260576ddb12 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, | |||
| 171 | { | 171 | { |
| 172 | int i; | 172 | int i; |
| 173 | 173 | ||
| 174 | mutex_lock(&cpufreq_governor_lock); | ||
| 175 | if (!policy->governor_enabled) | ||
| 176 | goto out_unlock; | ||
| 177 | |||
| 178 | if (!all_cpus) { | 174 | if (!all_cpus) { |
| 179 | /* | 175 | /* |
| 180 | * Use raw_smp_processor_id() to avoid preemptible warnings. | 176 | * Use raw_smp_processor_id() to avoid preemptible warnings. |
| @@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, | |||
| 188 | for_each_cpu(i, policy->cpus) | 184 | for_each_cpu(i, policy->cpus) |
| 189 | __gov_queue_work(i, dbs_data, delay); | 185 | __gov_queue_work(i, dbs_data, delay); |
| 190 | } | 186 | } |
| 191 | |||
| 192 | out_unlock: | ||
| 193 | mutex_unlock(&cpufreq_governor_lock); | ||
| 194 | } | 187 | } |
| 195 | EXPORT_SYMBOL_GPL(gov_queue_work); | 188 | EXPORT_SYMBOL_GPL(gov_queue_work); |
| 196 | 189 | ||
| @@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work) | |||
| 229 | struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, | 222 | struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, |
| 230 | dwork.work); | 223 | dwork.work); |
| 231 | struct cpu_common_dbs_info *shared = cdbs->shared; | 224 | struct cpu_common_dbs_info *shared = cdbs->shared; |
| 232 | struct cpufreq_policy *policy = shared->policy; | 225 | struct cpufreq_policy *policy; |
| 233 | struct dbs_data *dbs_data = policy->governor_data; | 226 | struct dbs_data *dbs_data; |
| 234 | unsigned int sampling_rate, delay; | 227 | unsigned int sampling_rate, delay; |
| 235 | bool modify_all = true; | 228 | bool modify_all = true; |
| 236 | 229 | ||
| 237 | mutex_lock(&shared->timer_mutex); | 230 | mutex_lock(&shared->timer_mutex); |
| 238 | 231 | ||
| 232 | policy = shared->policy; | ||
| 233 | |||
| 234 | /* | ||
| 235 | * Governor might already be disabled and there is no point continuing | ||
| 236 | * with the work-handler. | ||
| 237 | */ | ||
| 238 | if (!policy) | ||
| 239 | goto unlock; | ||
| 240 | |||
| 241 | dbs_data = policy->governor_data; | ||
| 242 | |||
| 239 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 243 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
| 240 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 244 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
| 241 | 245 | ||
| @@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work) | |||
| 252 | delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); | 256 | delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); |
| 253 | gov_queue_work(dbs_data, policy, delay, modify_all); | 257 | gov_queue_work(dbs_data, policy, delay, modify_all); |
| 254 | 258 | ||
| 259 | unlock: | ||
| 255 | mutex_unlock(&shared->timer_mutex); | 260 | mutex_unlock(&shared->timer_mutex); |
| 256 | } | 261 | } |
| 257 | 262 | ||
| @@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, | |||
| 478 | if (!shared || !shared->policy) | 483 | if (!shared || !shared->policy) |
| 479 | return -EBUSY; | 484 | return -EBUSY; |
| 480 | 485 | ||
| 486 | /* | ||
| 487 | * Work-handler must see this updated, as it should not proceed any | ||
| 488 | * further after governor is disabled. And so timer_mutex is taken while | ||
| 489 | * updating this value. | ||
| 490 | */ | ||
| 491 | mutex_lock(&shared->timer_mutex); | ||
| 492 | shared->policy = NULL; | ||
| 493 | mutex_unlock(&shared->timer_mutex); | ||
| 494 | |||
| 481 | gov_cancel_work(dbs_data, policy); | 495 | gov_cancel_work(dbs_data, policy); |
| 482 | 496 | ||
| 483 | shared->policy = NULL; | ||
| 484 | mutex_destroy(&shared->timer_mutex); | 497 | mutex_destroy(&shared->timer_mutex); |
| 485 | return 0; | 498 | return 0; |
| 486 | } | 499 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 93a3c635ea27..2e31d097def6 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -684,8 +684,6 @@ static void __init intel_pstate_sysfs_expose_params(void) | |||
| 684 | 684 | ||
| 685 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) | 685 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) |
| 686 | { | 686 | { |
| 687 | pr_info("intel_pstate: HWP enabled\n"); | ||
| 688 | |||
| 689 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); | 687 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
| 690 | } | 688 | } |
| 691 | 689 | ||
| @@ -1557,8 +1555,10 @@ static int __init intel_pstate_init(void) | |||
| 1557 | if (!all_cpu_data) | 1555 | if (!all_cpu_data) |
| 1558 | return -ENOMEM; | 1556 | return -ENOMEM; |
| 1559 | 1557 | ||
| 1560 | if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) | 1558 | if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { |
| 1559 | pr_info("intel_pstate: HWP enabled\n"); | ||
| 1561 | hwp_active++; | 1560 | hwp_active++; |
| 1561 | } | ||
| 1562 | 1562 | ||
| 1563 | if (!hwp_active && hwp_only) | 1563 | if (!hwp_active && hwp_only) |
| 1564 | goto out; | 1564 | goto out; |
| @@ -1593,8 +1593,10 @@ static int __init intel_pstate_setup(char *str) | |||
| 1593 | 1593 | ||
| 1594 | if (!strcmp(str, "disable")) | 1594 | if (!strcmp(str, "disable")) |
| 1595 | no_load = 1; | 1595 | no_load = 1; |
| 1596 | if (!strcmp(str, "no_hwp")) | 1596 | if (!strcmp(str, "no_hwp")) { |
| 1597 | pr_info("intel_pstate: HWP disabled\n"); | ||
| 1597 | no_hwp = 1; | 1598 | no_hwp = 1; |
| 1599 | } | ||
| 1598 | if (!strcmp(str, "force")) | 1600 | if (!strcmp(str, "force")) |
| 1599 | force_load = 1; | 1601 | force_load = 1; |
| 1600 | if (!strcmp(str, "hwp_only")) | 1602 | if (!strcmp(str, "hwp_only")) |
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 9e231f52150c..051a8a8224cd 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
| @@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) | |||
| 212 | /* Find current DRAM frequency */ | 212 | /* Find current DRAM frequency */ |
| 213 | tmp = s5pv210_dram_conf[ch].freq; | 213 | tmp = s5pv210_dram_conf[ch].freq; |
| 214 | 214 | ||
| 215 | do_div(tmp, freq); | 215 | tmp /= freq; |
| 216 | 216 | ||
| 217 | tmp1 = s5pv210_dram_conf[ch].refresh; | 217 | tmp1 = s5pv210_dram_conf[ch].refresh; |
| 218 | 218 | ||
| 219 | do_div(tmp1, tmp); | 219 | tmp1 /= tmp; |
| 220 | 220 | ||
| 221 | __raw_writel(tmp1, reg); | 221 | __raw_writel(tmp1, reg); |
| 222 | } | 222 | } |
