aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-11 22:11:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-11 22:11:06 -0400
commitfa9a67ef9de48de5474ea1e5a358340369e78b74 (patch)
tree29cd125c13977b5c84f3ff99cd2f51b581d3d907
parent05c78081d2d8eaf04bf60946fcc53380febf3376 (diff)
parent4614e0cc66a8ea1d163efc364ba743424dee5c0a (diff)
Merge tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management and ACPI updates from Rafael Wysocki: "These are mostly fixes and cleanups on top of the previous PM+ACPI pull request (cpufreq core and drivers, cpuidle, generic power domains framework). Some of them didn't make to that pull request and some fix issues introduced by it. The only really new thing is the support for suspend frequency in the cpufreq-dt driver, but it is needed to fix an issue with Exynos platforms. Specifics: - build fix for the new Mediatek MT8173 cpufreq driver (Guenter Roeck). - generic power domains framework fixes (power on error code path, subdomain removal) and cleanup of a deprecated API user (Geert Uytterhoeven, Jon Hunter, Ulf Hansson). - cpufreq-dt driver fixes including two fixes for bugs related to the new Operating Performance Points Device Tree bindings introduced recently (Viresh Kumar). - suspend frequency support for the cpufreq-dt driver (Bartlomiej Zolnierkiewicz, Viresh Kumar). - cpufreq core cleanups (Viresh Kumar). - intel_pstate driver fixes (Chen Yu, Kristen Carlson Accardi). - additional sanity check in the cpuidle core (Xunlei Pang). - fix for a comment related to CPU power management (Lina Iyer)" * tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: intel_pstate: fix PCT_TO_HWP macro intel_pstate: Fix user input of min/max to legal policy region PM / OPP: Return suspend_opp only if it is enabled cpufreq-dt: add suspend frequency support cpufreq: allow cpufreq_generic_suspend() to work without suspend frequency PM / OPP: add dev_pm_opp_get_suspend_opp() helper staging: board: Migrate away from __pm_genpd_name_add_device() cpufreq: Use __func__ to print function's name cpufreq: staticize cpufreq_cpu_get_raw() PM / Domains: Ensure subdomain is not in use before removing cpufreq: Add ARM_MT8173_CPUFREQ dependency on THERMAL cpuidle/coupled: Add sanity check for safe_state_index PM / Domains: Try power off masters in error path of __pm_genpd_poweron() cpufreq: dt: Tolerance applies on both sides of target voltage cpufreq: dt: Print error on failing to mark OPPs as shared cpufreq: dt: Check OPP count before marking them shared kernel/cpu_pm: fix cpu_cluster_pm_exit comment
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/base/power/opp.c28
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c39
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpuidle/coupled.c22
-rw-r--r--drivers/cpuidle/cpuidle.h6
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/staging/board/armadillo800eva.c2
-rw-r--r--drivers/staging/board/board.c36
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--kernel/cpu_pm.c2
13 files changed, 187 insertions, 44 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 416720159e96..16550c63d611 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -213,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
213} 213}
214 214
215/** 215/**
216 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
217 * @genpd: PM domait to power off.
218 *
219 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
220 * before.
221 */
222static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
223{
224 queue_work(pm_wq, &genpd->power_off_work);
225}
226
227/**
216 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 228 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
217 * @genpd: PM domain to power up. 229 * @genpd: PM domain to power up.
218 * 230 *
@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
259 return 0; 271 return 0;
260 272
261 err: 273 err:
262 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 274 list_for_each_entry_continue_reverse(link,
275 &genpd->slave_links,
276 slave_node) {
263 genpd_sd_counter_dec(link->master); 277 genpd_sd_counter_dec(link->master);
278 genpd_queue_power_off_work(link->master);
279 }
264 280
265 return ret; 281 return ret;
266} 282}
@@ -349,18 +365,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
349} 365}
350 366
351/** 367/**
352 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
353 * @genpd: PM domait to power off.
354 *
355 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
356 * before.
357 */
358static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
359{
360 queue_work(pm_wq, &genpd->power_off_work);
361}
362
363/**
364 * pm_genpd_poweroff - Remove power from a given PM domain. 368 * pm_genpd_poweroff - Remove power from a given PM domain.
365 * @genpd: PM domain to power down. 369 * @genpd: PM domain to power down.
366 * 370 *
@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1469 1473
1470 mutex_lock(&genpd->lock); 1474 mutex_lock(&genpd->lock);
1471 1475
1476 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1477 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1478 subdomain->name);
1479 ret = -EBUSY;
1480 goto out;
1481 }
1482
1472 list_for_each_entry(link, &genpd->master_links, master_node) { 1483 list_for_each_entry(link, &genpd->master_links, master_node) {
1473 if (link->slave != subdomain) 1484 if (link->slave != subdomain)
1474 continue; 1485 continue;
@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1487 break; 1498 break;
1488 } 1499 }
1489 1500
1501out:
1490 mutex_unlock(&genpd->lock); 1502 mutex_unlock(&genpd->lock);
1491 1503
1492 return ret; 1504 return ret;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index eb254497a494..28cd75c535b0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -341,6 +341,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 341EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
342 342
343/** 343/**
344 * dev_pm_opp_get_suspend_opp() - Get suspend opp
345 * @dev: device for which we do this operation
346 *
347 * Return: This function returns pointer to the suspend opp if it is
348 * defined and available, otherwise it returns NULL.
349 *
350 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
351 * protected pointer. The reason for the same is that the opp pointer which is
352 * returned will remain valid for use with opp_get_{voltage, freq} only while
353 * under the locked area. The pointer returned must be used prior to unlocking
354 * with rcu_read_unlock() to maintain the integrity of the pointer.
355 */
356struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
357{
358 struct device_opp *dev_opp;
359
360 opp_rcu_lockdep_assert();
361
362 dev_opp = _find_device_opp(dev);
363 if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
364 !dev_opp->suspend_opp->available)
365 return NULL;
366
367 return dev_opp->suspend_opp;
368}
369EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
370
371/**
344 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 372 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
345 * @dev: device for which we do this operation 373 * @dev: device for which we do this operation
346 * 374 *
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f498d9f1825..cd0391e46c6d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on !CPU_THERMAL || THERMAL=y
87 select PM_OPP 88 select PM_OPP
88 help 89 help
89 This adds the CPUFreq driver support for Mediatek MT8173 SoC. 90 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index c3583cdfadbd..7c0d70e2a861 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
196 struct device *cpu_dev; 196 struct device *cpu_dev;
197 struct regulator *cpu_reg; 197 struct regulator *cpu_reg;
198 struct clk *cpu_clk; 198 struct clk *cpu_clk;
199 struct dev_pm_opp *suspend_opp;
199 unsigned long min_uV = ~0, max_uV = 0; 200 unsigned long min_uV = ~0, max_uV = 0;
200 unsigned int transition_latency; 201 unsigned int transition_latency;
201 bool need_update = false; 202 bool need_update = false;
@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
239 */ 240 */
240 of_cpumask_init_opp_table(policy->cpus); 241 of_cpumask_init_opp_table(policy->cpus);
241 242
243 /*
244 * But we need OPP table to function so if it is not there let's
245 * give platform code chance to provide it for us.
246 */
247 ret = dev_pm_opp_get_opp_count(cpu_dev);
248 if (ret <= 0) {
249 pr_debug("OPP table is not ready, deferring probe\n");
250 ret = -EPROBE_DEFER;
251 goto out_free_opp;
252 }
253
242 if (need_update) { 254 if (need_update) {
243 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); 255 struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244 256
@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
249 * OPP tables are initialized only for policy->cpu, do it for 261 * OPP tables are initialized only for policy->cpu, do it for
250 * others as well. 262 * others as well.
251 */ 263 */
252 set_cpus_sharing_opps(cpu_dev, policy->cpus); 264 ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
265 if (ret)
266 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
267 __func__, ret);
253 268
254 of_property_read_u32(np, "clock-latency", &transition_latency); 269 of_property_read_u32(np, "clock-latency", &transition_latency);
255 } else { 270 } else {
256 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); 271 transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257 } 272 }
258 273
259 /*
260 * But we need OPP table to function so if it is not there let's
261 * give platform code chance to provide it for us.
262 */
263 ret = dev_pm_opp_get_opp_count(cpu_dev);
264 if (ret <= 0) {
265 pr_debug("OPP table is not ready, deferring probe\n");
266 ret = -EPROBE_DEFER;
267 goto out_free_opp;
268 }
269
270 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 274 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
271 if (!priv) { 275 if (!priv) {
272 ret = -ENOMEM; 276 ret = -ENOMEM;
@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300 rcu_read_unlock(); 304 rcu_read_unlock();
301 305
302 tol_uV = opp_uV * priv->voltage_tolerance / 100; 306 tol_uV = opp_uV * priv->voltage_tolerance / 100;
303 if (regulator_is_supported_voltage(cpu_reg, opp_uV, 307 if (regulator_is_supported_voltage(cpu_reg,
308 opp_uV - tol_uV,
304 opp_uV + tol_uV)) { 309 opp_uV + tol_uV)) {
305 if (opp_uV < min_uV) 310 if (opp_uV < min_uV)
306 min_uV = opp_uV; 311 min_uV = opp_uV;
@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
329 policy->driver_data = priv; 334 policy->driver_data = priv;
330 335
331 policy->clk = cpu_clk; 336 policy->clk = cpu_clk;
337
338 rcu_read_lock();
339 suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
340 if (suspend_opp)
341 policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
342 rcu_read_unlock();
343
332 ret = cpufreq_table_validate_and_show(policy, freq_table); 344 ret = cpufreq_table_validate_and_show(policy, freq_table);
333 if (ret) { 345 if (ret) {
334 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, 346 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
419 .ready = cpufreq_ready, 431 .ready = cpufreq_ready,
420 .name = "cpufreq-dt", 432 .name = "cpufreq-dt",
421 .attr = cpufreq_dt_attr, 433 .attr = cpufreq_dt_attr,
434 .suspend = cpufreq_generic_suspend,
422}; 435};
423 436
424static int dt_cpufreq_probe(struct platform_device *pdev) 437static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3d9368339af..6633b3fa996e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
239EXPORT_SYMBOL_GPL(cpufreq_generic_init); 239EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240 240
241/* Only for cpufreq core internal use */ 241/* Only for cpufreq core internal use */
242struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 242static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243{ 243{
244 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 244 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245 245
@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1626 int ret; 1626 int ret;
1627 1627
1628 if (!policy->suspend_freq) { 1628 if (!policy->suspend_freq) {
1629 pr_err("%s: suspend_freq can't be zero\n", __func__); 1629 pr_debug("%s: suspend_freq not defined\n", __func__);
1630 return -EINVAL; 1630 return 0;
1631 } 1631 }
1632 1632
1633 pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1633 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
2031 if (!try_module_get(policy->governor->owner)) 2031 if (!try_module_get(policy->governor->owner))
2032 return -EINVAL; 2032 return -EINVAL;
2033 2033
2034 pr_debug("__cpufreq_governor for CPU %u, event %u\n", 2034 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2035 policy->cpu, event);
2036 2035
2037 mutex_lock(&cpufreq_governor_lock); 2036 mutex_lock(&cpufreq_governor_lock);
2038 if ((policy->governor_enabled && event == CPUFREQ_GOV_START) 2037 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cddc61939a86..3af9dd7332e6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
261} 261}
262 262
263#define PCT_TO_HWP(x) (x * 255 / 100)
264static void intel_pstate_hwp_set(void) 263static void intel_pstate_hwp_set(void)
265{ 264{
266 int min, max, cpu; 265 int min, hw_min, max, hw_max, cpu, range, adj_range;
267 u64 value, freq; 266 u64 value, cap;
267
268 rdmsrl(MSR_HWP_CAPABILITIES, cap);
269 hw_min = HWP_LOWEST_PERF(cap);
270 hw_max = HWP_HIGHEST_PERF(cap);
271 range = hw_max - hw_min;
268 272
269 get_online_cpus(); 273 get_online_cpus();
270 274
271 for_each_online_cpu(cpu) { 275 for_each_online_cpu(cpu) {
272 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 276 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
273 min = PCT_TO_HWP(limits.min_perf_pct); 277 adj_range = limits.min_perf_pct * range / 100;
278 min = hw_min + adj_range;
274 value &= ~HWP_MIN_PERF(~0L); 279 value &= ~HWP_MIN_PERF(~0L);
275 value |= HWP_MIN_PERF(min); 280 value |= HWP_MIN_PERF(min);
276 281
277 max = PCT_TO_HWP(limits.max_perf_pct); 282 adj_range = limits.max_perf_pct * range / 100;
283 max = hw_min + adj_range;
278 if (limits.no_turbo) { 284 if (limits.no_turbo) {
279 rdmsrl( MSR_HWP_CAPABILITIES, freq); 285 hw_max = HWP_GUARANTEED_PERF(cap);
280 max = HWP_GUARANTEED_PERF(freq); 286 if (hw_max < max)
287 max = hw_max;
281 } 288 }
282 289
283 value &= ~HWP_MAX_PERF(~0L); 290 value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
423 430
424 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 431 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
425 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 432 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
433 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
434 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
426 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 435 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
427 436
428 if (hwp_active) 437 if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
442 451
443 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 452 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
444 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 453 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
454 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
455 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
445 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 456 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
446 457
447 if (hwp_active) 458 if (hwp_active)
@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
989 1000
990 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1001 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
991 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 1002 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
992 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
993 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
994
995 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1003 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
996 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 1004 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1005
1006 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1007 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1008 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
997 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 1009 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1010 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1011
1012 /* Make sure min_perf_pct <= max_perf_pct */
1013 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1014
1015 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
998 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 1016 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
999 1017
1000 if (hwp_active) 1018 if (hwp_active)
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 1523e2d745eb..344058f8501a 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -187,6 +187,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
187} 187}
188 188
189/** 189/**
190 * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
191 * @drv: struct cpuidle_driver for the platform
192 *
193 * Returns 0 for valid state values, a negative error code otherwise:
194 * * -EINVAL if any coupled state(safe_state_index) is wrongly set.
195 */
196int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
197{
198 int i;
199
200 for (i = drv->state_count - 1; i >= 0; i--) {
201 if (cpuidle_state_is_coupled(drv, i) &&
202 (drv->safe_state_index == i ||
203 drv->safe_state_index < 0 ||
204 drv->safe_state_index >= drv->state_count))
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211/**
190 * cpuidle_coupled_set_ready - mark a cpu as ready 212 * cpuidle_coupled_set_ready - mark a cpu as ready
191 * @coupled: the struct coupled that contains the current cpu 213 * @coupled: the struct coupled that contains the current cpu
192 */ 214 */
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 178c5ad3d568..f87f399b0540 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
35 35
36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 36#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
37bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state); 37bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
38int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
38int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 39int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
39 struct cpuidle_driver *drv, int next_state); 40 struct cpuidle_driver *drv, int next_state);
40int cpuidle_coupled_register_device(struct cpuidle_device *dev); 41int cpuidle_coupled_register_device(struct cpuidle_device *dev);
@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
46 return false; 47 return false;
47} 48}
48 49
50static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
51{
52 return 0;
53}
54
49static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 55static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
50 struct cpuidle_driver *drv, int next_state) 56 struct cpuidle_driver *drv, int next_state)
51{ 57{
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 5db147859b90..389ade4572be 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
227 if (!drv || !drv->state_count) 227 if (!drv || !drv->state_count)
228 return -EINVAL; 228 return -EINVAL;
229 229
230 ret = cpuidle_coupled_state_verify(drv);
231 if (ret)
232 return ret;
233
230 if (cpuidle_disabled()) 234 if (cpuidle_disabled())
231 return -ENODEV; 235 return -ENODEV;
232 236
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 81df77bd55cc..9c41652ee908 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
91 .pdev = &lcdc0_device, 91 .pdev = &lcdc0_device,
92 .clocks = lcdc0_clocks, 92 .clocks = lcdc0_clocks,
93 .nclocks = ARRAY_SIZE(lcdc0_clocks), 93 .nclocks = ARRAY_SIZE(lcdc0_clocks),
94 .domain = "a4lc", 94 .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
95 }, 95 },
96}; 96};
97 97
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 29d456e29f38..3eb5eb8f069c 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
135 return error; 135 return error;
136} 136}
137 137
138#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
139static int board_staging_add_dev_domain(struct platform_device *pdev,
140 const char *domain)
141{
142 struct of_phandle_args pd_args;
143 struct generic_pm_domain *pd;
144 struct device_node *np;
145
146 np = of_find_node_by_path(domain);
147 if (!np) {
148 pr_err("Cannot find domain node %s\n", domain);
149 return -ENOENT;
150 }
151
152 pd_args.np = np;
153 pd_args.args_count = 0;
154 pd = of_genpd_get_from_provider(&pd_args);
155 if (IS_ERR(pd)) {
156 pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
157 return PTR_ERR(pd);
158
159 }
160 pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
161
162 return pm_genpd_add_device(pd, &pdev->dev);
163}
164#else
165static inline int board_staging_add_dev_domain(struct platform_device *pdev,
166 const char *domain)
167{
168 return 0;
169}
170#endif
171
138int __init board_staging_register_device(const struct board_staging_dev *dev) 172int __init board_staging_register_device(const struct board_staging_dev *dev)
139{ 173{
140 struct platform_device *pdev = dev->pdev; 174 struct platform_device *pdev = dev->pdev;
@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev)
161 } 195 }
162 196
163 if (dev->domain) 197 if (dev->domain)
164 __pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL); 198 board_staging_add_dev_domain(pdev, dev->domain);
165 199
166 return error; 200 return error;
167} 201}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cab7ba55bedb..e817722ee3f0 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34 34
35int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); 36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
37struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
37 38
38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 39struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
39 unsigned long freq, 40 unsigned long freq,
@@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
80 return 0; 81 return 0;
81} 82}
82 83
84static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
85{
86 return NULL;
87}
88
83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 89static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
84 unsigned long freq, bool available) 90 unsigned long freq, bool available)
85{ 91{
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 9656a3c36503..009cc9a17d95 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
180 * low power state that may have caused some blocks in the same power domain 180 * low power state that may have caused some blocks in the same power domain
181 * to reset. 181 * to reset.
182 * 182 *
183 * Must be called after cpu_pm_exit has been called on all cpus in the power 183 * Must be called after cpu_cluster_pm_enter has been called for the power
184 * domain, and before cpu_pm_exit has been called on any cpu in the power 184 * domain, and before cpu_pm_exit has been called on any cpu in the power
185 * domain. Notified drivers can include VFP co-processor, interrupt controller 185 * domain. Notified drivers can include VFP co-processor, interrupt controller
186 * and its PM extensions, local CPU timers context save/restore which 186 * and its PM extensions, local CPU timers context save/restore which