aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS3
-rw-r--r--drivers/cpufreq/arm_big_little.c22
-rw-r--r--drivers/cpufreq/cpufreq_governor.c33
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
5 files changed, 46 insertions, 26 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index caddb49b207d..08a1378c70ed 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5440,7 +5440,8 @@ S: Supported
5440F: drivers/idle/intel_idle.c 5440F: drivers/idle/intel_idle.c
5441 5441
5442INTEL PSTATE DRIVER 5442INTEL PSTATE DRIVER
5443M: Kristen Carlson Accardi <kristen@linux.intel.com> 5443M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
5444M: Len Brown <lenb@kernel.org>
5444L: linux-pm@vger.kernel.org 5445L: linux-pm@vger.kernel.org
5445S: Supported 5446S: Supported
5446F: drivers/cpufreq/intel_pstate.c 5447F: drivers/cpufreq/intel_pstate.c
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index f1e42f8ce0fc..c5d256caa664 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
149 __func__, cpu, old_cluster, new_cluster, new_rate); 149 __func__, cpu, old_cluster, new_cluster, new_rate);
150 150
151 ret = clk_set_rate(clk[new_cluster], new_rate * 1000); 151 ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
152 if (!ret) {
153 /*
154 * FIXME: clk_set_rate hasn't returned an error here however it
155 * may be that clk_change_rate failed due to hardware or
156 * firmware issues and wasn't able to report that due to the
157 * current design of the clk core layer. To work around this
158 * problem we will read back the clock rate and check it is
159 * correct. This needs to be removed once clk core is fixed.
160 */
161 if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
162 ret = -EIO;
163 }
164
152 if (WARN_ON(ret)) { 165 if (WARN_ON(ret)) {
153 pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, 166 pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
154 new_cluster); 167 new_cluster);
@@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
189 mutex_unlock(&cluster_lock[old_cluster]); 202 mutex_unlock(&cluster_lock[old_cluster]);
190 } 203 }
191 204
192 /*
193 * FIXME: clk_set_rate has to handle the case where clk_change_rate
194 * can fail due to hardware or firmware issues. Until the clk core
195 * layer is fixed, we can check here. In most of the cases we will
196 * be reading only the cached value anyway. This needs to be removed
197 * once clk core is fixed.
198 */
199 if (bL_cpufreq_get_rate(cpu) != new_rate)
200 return -EIO;
201 return 0; 205 return 0;
202} 206}
203 207
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 11258c4c1b17..b260576ddb12 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
171{ 171{
172 int i; 172 int i;
173 173
174 mutex_lock(&cpufreq_governor_lock);
175 if (!policy->governor_enabled)
176 goto out_unlock;
177
178 if (!all_cpus) { 174 if (!all_cpus) {
179 /* 175 /*
180 * Use raw_smp_processor_id() to avoid preemptible warnings. 176 * Use raw_smp_processor_id() to avoid preemptible warnings.
@@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
188 for_each_cpu(i, policy->cpus) 184 for_each_cpu(i, policy->cpus)
189 __gov_queue_work(i, dbs_data, delay); 185 __gov_queue_work(i, dbs_data, delay);
190 } 186 }
191
192out_unlock:
193 mutex_unlock(&cpufreq_governor_lock);
194} 187}
195EXPORT_SYMBOL_GPL(gov_queue_work); 188EXPORT_SYMBOL_GPL(gov_queue_work);
196 189
@@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work)
229 struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, 222 struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
230 dwork.work); 223 dwork.work);
231 struct cpu_common_dbs_info *shared = cdbs->shared; 224 struct cpu_common_dbs_info *shared = cdbs->shared;
232 struct cpufreq_policy *policy = shared->policy; 225 struct cpufreq_policy *policy;
233 struct dbs_data *dbs_data = policy->governor_data; 226 struct dbs_data *dbs_data;
234 unsigned int sampling_rate, delay; 227 unsigned int sampling_rate, delay;
235 bool modify_all = true; 228 bool modify_all = true;
236 229
237 mutex_lock(&shared->timer_mutex); 230 mutex_lock(&shared->timer_mutex);
238 231
232 policy = shared->policy;
233
234 /*
235 * Governor might already be disabled and there is no point continuing
236 * with the work-handler.
237 */
238 if (!policy)
239 goto unlock;
240
241 dbs_data = policy->governor_data;
242
239 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 243 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
240 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 244 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
241 245
@@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work)
252 delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); 256 delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
253 gov_queue_work(dbs_data, policy, delay, modify_all); 257 gov_queue_work(dbs_data, policy, delay, modify_all);
254 258
259unlock:
255 mutex_unlock(&shared->timer_mutex); 260 mutex_unlock(&shared->timer_mutex);
256} 261}
257 262
@@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
478 if (!shared || !shared->policy) 483 if (!shared || !shared->policy)
479 return -EBUSY; 484 return -EBUSY;
480 485
486 /*
487 * Work-handler must see this updated, as it should not proceed any
488 * further after governor is disabled. And so timer_mutex is taken while
489 * updating this value.
490 */
491 mutex_lock(&shared->timer_mutex);
492 shared->policy = NULL;
493 mutex_unlock(&shared->timer_mutex);
494
481 gov_cancel_work(dbs_data, policy); 495 gov_cancel_work(dbs_data, policy);
482 496
483 shared->policy = NULL;
484 mutex_destroy(&shared->timer_mutex); 497 mutex_destroy(&shared->timer_mutex);
485 return 0; 498 return 0;
486} 499}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a3c635ea27..2e31d097def6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -684,8 +684,6 @@ static void __init intel_pstate_sysfs_expose_params(void)
684 684
685static void intel_pstate_hwp_enable(struct cpudata *cpudata) 685static void intel_pstate_hwp_enable(struct cpudata *cpudata)
686{ 686{
687 pr_info("intel_pstate: HWP enabled\n");
688
689 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
690} 688}
691 689
@@ -1557,8 +1555,10 @@ static int __init intel_pstate_init(void)
1557 if (!all_cpu_data) 1555 if (!all_cpu_data)
1558 return -ENOMEM; 1556 return -ENOMEM;
1559 1557
1560 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1558 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
1559 pr_info("intel_pstate: HWP enabled\n");
1561 hwp_active++; 1560 hwp_active++;
1561 }
1562 1562
1563 if (!hwp_active && hwp_only) 1563 if (!hwp_active && hwp_only)
1564 goto out; 1564 goto out;
@@ -1593,8 +1593,10 @@ static int __init intel_pstate_setup(char *str)
1593 1593
1594 if (!strcmp(str, "disable")) 1594 if (!strcmp(str, "disable"))
1595 no_load = 1; 1595 no_load = 1;
1596 if (!strcmp(str, "no_hwp")) 1596 if (!strcmp(str, "no_hwp")) {
1597 pr_info("intel_pstate: HWP disabled\n");
1597 no_hwp = 1; 1598 no_hwp = 1;
1599 }
1598 if (!strcmp(str, "force")) 1600 if (!strcmp(str, "force"))
1599 force_load = 1; 1601 force_load = 1;
1600 if (!strcmp(str, "hwp_only")) 1602 if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 9e231f52150c..051a8a8224cd 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
212 /* Find current DRAM frequency */ 212 /* Find current DRAM frequency */
213 tmp = s5pv210_dram_conf[ch].freq; 213 tmp = s5pv210_dram_conf[ch].freq;
214 214
215 do_div(tmp, freq); 215 tmp /= freq;
216 216
217 tmp1 = s5pv210_dram_conf[ch].refresh; 217 tmp1 = s5pv210_dram_conf[ch].refresh;
218 218
219 do_div(tmp1, tmp); 219 tmp1 /= tmp;
220 220
221 __raw_writel(tmp1, reg); 221 __raw_writel(tmp1, reg);
222} 222}