aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c68
-rw-r--r--kernel/power/hibernate.c8
3 files changed, 51 insertions, 27 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 44973196d3fd..9717d5f20139 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1266,6 +1266,8 @@ int dpm_suspend_late(pm_message_t state)
1266 } 1266 }
1267 mutex_unlock(&dpm_list_mtx); 1267 mutex_unlock(&dpm_list_mtx);
1268 async_synchronize_full(); 1268 async_synchronize_full();
1269 if (!error)
1270 error = async_error;
1269 if (error) { 1271 if (error) {
1270 suspend_stats.failed_suspend_late++; 1272 suspend_stats.failed_suspend_late++;
1271 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1273 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 92c162af5045..23aaf40cf37f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -187,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
187 struct device *cpu_dev; 187 struct device *cpu_dev;
188 struct regulator *cpu_reg; 188 struct regulator *cpu_reg;
189 struct clk *cpu_clk; 189 struct clk *cpu_clk;
190 unsigned long min_uV = ~0, max_uV = 0;
190 unsigned int transition_latency; 191 unsigned int transition_latency;
191 int ret; 192 int ret;
192 193
@@ -206,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
206 /* OPPs might be populated at runtime, don't check for error here */ 207 /* OPPs might be populated at runtime, don't check for error here */
207 of_init_opp_table(cpu_dev); 208 of_init_opp_table(cpu_dev);
208 209
209 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
210 if (ret) {
211 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
212 goto out_put_node;
213 }
214
215 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 210 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
216 if (!priv) { 211 if (!priv) {
217 ret = -ENOMEM; 212 ret = -ENOMEM;
218 goto out_free_table; 213 goto out_put_node;
219 } 214 }
220 215
221 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 216 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -224,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy)
224 transition_latency = CPUFREQ_ETERNAL; 219 transition_latency = CPUFREQ_ETERNAL;
225 220
226 if (!IS_ERR(cpu_reg)) { 221 if (!IS_ERR(cpu_reg)) {
227 struct dev_pm_opp *opp; 222 unsigned long opp_freq = 0;
228 unsigned long min_uV, max_uV;
229 int i;
230 223
231 /* 224 /*
232 * OPP is maintained in order of increasing frequency, and 225 * Disable any OPPs where the connected regulator isn't able to
233 * freq_table initialised from OPP is therefore sorted in the 226 * provide the specified voltage and record minimum and maximum
234 * same order. 227 * voltage levels.
235 */ 228 */
236 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 229 while (1) {
237 ; 230 struct dev_pm_opp *opp;
238 rcu_read_lock(); 231 unsigned long opp_uV, tol_uV;
239 opp = dev_pm_opp_find_freq_exact(cpu_dev, 232
240 freq_table[0].frequency * 1000, true); 233 rcu_read_lock();
241 min_uV = dev_pm_opp_get_voltage(opp); 234 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
242 opp = dev_pm_opp_find_freq_exact(cpu_dev, 235 if (IS_ERR(opp)) {
243 freq_table[i-1].frequency * 1000, true); 236 rcu_read_unlock();
244 max_uV = dev_pm_opp_get_voltage(opp); 237 break;
245 rcu_read_unlock(); 238 }
239 opp_uV = dev_pm_opp_get_voltage(opp);
240 rcu_read_unlock();
241
242 tol_uV = opp_uV * priv->voltage_tolerance / 100;
243 if (regulator_is_supported_voltage(cpu_reg, opp_uV,
244 opp_uV + tol_uV)) {
245 if (opp_uV < min_uV)
246 min_uV = opp_uV;
247 if (opp_uV > max_uV)
248 max_uV = opp_uV;
249 } else {
250 dev_pm_opp_disable(cpu_dev, opp_freq);
251 }
252
253 opp_freq++;
254 }
255
246 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 256 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
247 if (ret > 0) 257 if (ret > 0)
248 transition_latency += ret * 1000; 258 transition_latency += ret * 1000;
249 } 259 }
250 260
261 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
262 if (ret) {
263 pr_err("failed to init cpufreq table: %d\n", ret);
264 goto out_free_priv;
265 }
266
251 /* 267 /*
252 * For now, just loading the cooling device; 268 * For now, just loading the cooling device;
253 * thermal DT code takes care of matching them. 269 * thermal DT code takes care of matching them.
@@ -277,7 +293,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
277 policy->cpuinfo.transition_latency = transition_latency; 293 policy->cpuinfo.transition_latency = transition_latency;
278 294
279 pd = cpufreq_get_driver_data(); 295 pd = cpufreq_get_driver_data();
280 if (pd && !pd->independent_clocks) 296 if (!pd || !pd->independent_clocks)
281 cpumask_setall(policy->cpus); 297 cpumask_setall(policy->cpus);
282 298
283 of_node_put(np); 299 of_node_put(np);
@@ -286,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
286 302
287out_cooling_unregister: 303out_cooling_unregister:
288 cpufreq_cooling_unregister(priv->cdev); 304 cpufreq_cooling_unregister(priv->cdev);
289 kfree(priv);
290out_free_table:
291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
306out_free_priv:
307 kfree(priv);
292out_put_node: 308out_put_node:
293 of_node_put(np); 309 of_node_put(np);
294out_put_reg_clk: 310out_put_reg_clk:
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a9dfa79b6bab..1f35a3478f3c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
502 error = dpm_suspend_start(PMSG_QUIESCE); 502 error = dpm_suspend_start(PMSG_QUIESCE);
503 if (!error) { 503 if (!error) {
504 error = resume_target_kernel(platform_mode); 504 error = resume_target_kernel(platform_mode);
505 dpm_resume_end(PMSG_RECOVER); 505 /*
506 * The above should either succeed and jump to the new kernel,
507 * or return with an error. Otherwise things are just
508 * undefined, so let's be paranoid.
509 */
510 BUG_ON(!error);
506 } 511 }
512 dpm_resume_end(PMSG_RECOVER);
507 pm_restore_gfp_mask(); 513 pm_restore_gfp_mask();
508 resume_console(); 514 resume_console();
509 pm_restore_console(); 515 pm_restore_console();