aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 21:32:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 21:32:35 -0400
commit16642a2e7be23bbda013fc32d8f6c68982eab603 (patch)
tree346ae485f485f6901e5d8150f0d34d178a7dd448 /drivers/base
parent51562cba98939da0a1d10fe7c25359b77a069033 (diff)
parentb9142167a2bb979b58b98ffcd928a311b55cbd9f (diff)
Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c244
-rw-r--r--drivers/base/power/main.c66
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/base/power/power.h36
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/power/wakeup.c46
7 files changed, 372 insertions, 72 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ddeca142293c..8727e9c5eea4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -23,6 +23,7 @@
23#include <linux/idr.h> 23#include <linux/idr.h>
24 24
25#include "base.h" 25#include "base.h"
26#include "power/power.h"
26 27
27/* For automatically allocated device IDs */ 28/* For automatically allocated device IDs */
28static DEFINE_IDA(platform_devid_ida); 29static DEFINE_IDA(platform_devid_ida);
@@ -983,6 +984,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
983 dev = &devs[i]->dev; 984 dev = &devs[i]->dev;
984 985
985 if (!dev->devres_head.next) { 986 if (!dev->devres_head.next) {
987 pm_runtime_early_init(dev);
986 INIT_LIST_HEAD(&dev->devres_head); 988 INIT_LIST_HEAD(&dev->devres_head);
987 list_add_tail(&dev->devres_head, 989 list_add_tail(&dev->devres_head,
988 &early_platform_device_list); 990 &early_platform_device_list);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba3487c9835b..c22b869245d9 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -53,6 +53,24 @@
53static LIST_HEAD(gpd_list); 53static LIST_HEAD(gpd_list);
54static DEFINE_MUTEX(gpd_list_lock); 54static DEFINE_MUTEX(gpd_list_lock);
55 55
56static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
57{
58 struct generic_pm_domain *genpd = NULL, *gpd;
59
60 if (IS_ERR_OR_NULL(domain_name))
61 return NULL;
62
63 mutex_lock(&gpd_list_lock);
64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
65 if (!strcmp(gpd->name, domain_name)) {
66 genpd = gpd;
67 break;
68 }
69 }
70 mutex_unlock(&gpd_list_lock);
71 return genpd;
72}
73
56#ifdef CONFIG_PM 74#ifdef CONFIG_PM
57 75
58struct generic_pm_domain *dev_to_genpd(struct device *dev) 76struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
256 return ret; 274 return ret;
257} 275}
258 276
277/**
278 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
279 * @domain_name: Name of the PM domain to power up.
280 */
281int pm_genpd_name_poweron(const char *domain_name)
282{
283 struct generic_pm_domain *genpd;
284
285 genpd = pm_genpd_lookup_name(domain_name);
286 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
287}
288
259#endif /* CONFIG_PM */ 289#endif /* CONFIG_PM */
260 290
261#ifdef CONFIG_PM_RUNTIME 291#ifdef CONFIG_PM_RUNTIME
262 292
293static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
294 struct device *dev)
295{
296 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
297}
298
263static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 299static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
264{ 300{
265 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 301 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
436 not_suspended = 0; 472 not_suspended = 0;
437 list_for_each_entry(pdd, &genpd->dev_list, list_node) 473 list_for_each_entry(pdd, &genpd->dev_list, list_node)
438 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 474 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
439 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) 475 || pdd->dev->power.irq_safe))
440 not_suspended++; 476 not_suspended++;
441 477
442 if (not_suspended > genpd->in_progress) 478 if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
578 614
579 might_sleep_if(!genpd->dev_irq_safe); 615 might_sleep_if(!genpd->dev_irq_safe);
580 616
581 if (dev_gpd_data(dev)->always_on)
582 return -EBUSY;
583
584 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 617 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
585 if (stop_ok && !stop_ok(dev)) 618 if (stop_ok && !stop_ok(dev))
586 return -EBUSY; 619 return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
629 662
630 /* If power.irq_safe, the PM domain is never powered off. */ 663 /* If power.irq_safe, the PM domain is never powered off. */
631 if (dev->power.irq_safe) 664 if (dev->power.irq_safe)
632 return genpd_start_dev(genpd, dev); 665 return genpd_start_dev_no_timing(genpd, dev);
633 666
634 mutex_lock(&genpd->lock); 667 mutex_lock(&genpd->lock);
635 ret = __pm_genpd_poweron(genpd); 668 ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
697 730
698#ifdef CONFIG_PM_SLEEP 731#ifdef CONFIG_PM_SLEEP
699 732
733/**
734 * pm_genpd_present - Check if the given PM domain has been initialized.
735 * @genpd: PM domain to check.
736 */
737static bool pm_genpd_present(struct generic_pm_domain *genpd)
738{
739 struct generic_pm_domain *gpd;
740
741 if (IS_ERR_OR_NULL(genpd))
742 return false;
743
744 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
745 if (gpd == genpd)
746 return true;
747
748 return false;
749}
750
700static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 751static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
701 struct device *dev) 752 struct device *dev)
702{ 753{
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
750 * Check if the given PM domain can be powered off (during system suspend or 801 * Check if the given PM domain can be powered off (during system suspend or
751 * hibernation) and do that if so. Also, in that case propagate to its masters. 802 * hibernation) and do that if so. Also, in that case propagate to its masters.
752 * 803 *
753 * This function is only called in "noirq" stages of system power transitions, 804 * This function is only called in "noirq" and "syscore" stages of system power
754 * so it need not acquire locks (all of the "noirq" callbacks are executed 805 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
755 * sequentially, so it is guaranteed that it will never run twice in parallel). 806 * executed sequentially, so it is guaranteed that it will never run twice in
807 * parallel).
756 */ 808 */
757static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 809static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
758{ 810{
@@ -777,6 +829,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
777} 829}
778 830
779/** 831/**
832 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
833 * @genpd: PM domain to power on.
834 *
835 * This function is only called in "noirq" and "syscore" stages of system power
836 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
837 * executed sequentially, so it is guaranteed that it will never run twice in
838 * parallel).
839 */
840static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
841{
842 struct gpd_link *link;
843
844 if (genpd->status != GPD_STATE_POWER_OFF)
845 return;
846
847 list_for_each_entry(link, &genpd->slave_links, slave_node) {
848 pm_genpd_sync_poweron(link->master);
849 genpd_sd_counter_inc(link->master);
850 }
851
852 if (genpd->power_on)
853 genpd->power_on(genpd);
854
855 genpd->status = GPD_STATE_ACTIVE;
856}
857
858/**
780 * resume_needed - Check whether to resume a device before system suspend. 859 * resume_needed - Check whether to resume a device before system suspend.
781 * @dev: Device to check. 860 * @dev: Device to check.
782 * @genpd: PM domain the device belongs to. 861 * @genpd: PM domain the device belongs to.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
937 if (IS_ERR(genpd)) 1016 if (IS_ERR(genpd))
938 return -EINVAL; 1017 return -EINVAL;
939 1018
940 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on 1019 if (genpd->suspend_power_off
941 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1020 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
942 return 0; 1021 return 0;
943 1022
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
970 if (IS_ERR(genpd)) 1049 if (IS_ERR(genpd))
971 return -EINVAL; 1050 return -EINVAL;
972 1051
973 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on 1052 if (genpd->suspend_power_off
974 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1053 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
975 return 0; 1054 return 0;
976 1055
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
979 * guaranteed that this function will never run twice in parallel for 1058 * guaranteed that this function will never run twice in parallel for
980 * the same PM domain, so it is not necessary to use locking here. 1059 * the same PM domain, so it is not necessary to use locking here.
981 */ 1060 */
982 pm_genpd_poweron(genpd); 1061 pm_genpd_sync_poweron(genpd);
983 genpd->suspended_count--; 1062 genpd->suspended_count--;
984 1063
985 return genpd_start_dev(genpd, dev); 1064 return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
1090 if (IS_ERR(genpd)) 1169 if (IS_ERR(genpd))
1091 return -EINVAL; 1170 return -EINVAL;
1092 1171
1093 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 1172 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1094 0 : genpd_stop_dev(genpd, dev);
1095} 1173}
1096 1174
1097/** 1175/**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
1111 if (IS_ERR(genpd)) 1189 if (IS_ERR(genpd))
1112 return -EINVAL; 1190 return -EINVAL;
1113 1191
1114 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 1192 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1115 0 : genpd_start_dev(genpd, dev);
1116} 1193}
1117 1194
1118/** 1195/**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
1186 if (genpd->suspended_count++ == 0) { 1263 if (genpd->suspended_count++ == 0) {
1187 /* 1264 /*
1188 * The boot kernel might put the domain into arbitrary state, 1265 * The boot kernel might put the domain into arbitrary state,
1189 * so make it appear as powered off to pm_genpd_poweron(), so 1266 * so make it appear as powered off to pm_genpd_sync_poweron(),
1190 * that it tries to power it on in case it was really off. 1267 * so that it tries to power it on in case it was really off.
1191 */ 1268 */
1192 genpd->status = GPD_STATE_POWER_OFF; 1269 genpd->status = GPD_STATE_POWER_OFF;
1193 if (genpd->suspend_power_off) { 1270 if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
1205 if (genpd->suspend_power_off) 1282 if (genpd->suspend_power_off)
1206 return 0; 1283 return 0;
1207 1284
1208 pm_genpd_poweron(genpd); 1285 pm_genpd_sync_poweron(genpd);
1209 1286
1210 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); 1287 return genpd_start_dev(genpd, dev);
1211} 1288}
1212 1289
1213/** 1290/**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
1246 } 1323 }
1247} 1324}
1248 1325
1326/**
1327 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1328 * @dev: Device that normally is marked as "always on" to switch power for.
1329 *
1330 * This routine may only be called during the system core (syscore) suspend or
1331 * resume phase for devices whose "always on" flags are set.
1332 */
1333void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1334{
1335 struct generic_pm_domain *genpd;
1336
1337 genpd = dev_to_genpd(dev);
1338 if (!pm_genpd_present(genpd))
1339 return;
1340
1341 if (suspend) {
1342 genpd->suspended_count++;
1343 pm_genpd_sync_poweroff(genpd);
1344 } else {
1345 pm_genpd_sync_poweron(genpd);
1346 genpd->suspended_count--;
1347 }
1348}
1349EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1350
1249#else 1351#else
1250 1352
1251#define pm_genpd_prepare NULL 1353#define pm_genpd_prepare NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1393 return __pm_genpd_add_device(genpd, dev, td); 1495 return __pm_genpd_add_device(genpd, dev, td);
1394} 1496}
1395 1497
1498
1499/**
1500 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1501 * @domain_name: Name of the PM domain to add the device to.
1502 * @dev: Device to be added.
1503 * @td: Set of PM QoS timing parameters to attach to the device.
1504 */
1505int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1506 struct gpd_timing_data *td)
1507{
1508 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1509}
1510
1396/** 1511/**
1397 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1512 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1398 * @genpd: PM domain to remove the device from. 1513 * @genpd: PM domain to remove the device from.
@@ -1455,26 +1570,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1455} 1570}
1456 1571
1457/** 1572/**
1458 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1459 * @dev: Device to set/unset the flag for.
1460 * @val: The new value of the device's "always on" flag.
1461 */
1462void pm_genpd_dev_always_on(struct device *dev, bool val)
1463{
1464 struct pm_subsys_data *psd;
1465 unsigned long flags;
1466
1467 spin_lock_irqsave(&dev->power.lock, flags);
1468
1469 psd = dev_to_psd(dev);
1470 if (psd && psd->domain_data)
1471 to_gpd_data(psd->domain_data)->always_on = val;
1472
1473 spin_unlock_irqrestore(&dev->power.lock, flags);
1474}
1475EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1476
1477/**
1478 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1573 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1479 * @dev: Device to set/unset the flag for. 1574 * @dev: Device to set/unset the flag for.
1480 * @val: The new value of the device's "need restore" flag. 1575 * @val: The new value of the device's "need restore" flag.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1505 struct gpd_link *link; 1600 struct gpd_link *link;
1506 int ret = 0; 1601 int ret = 0;
1507 1602
1508 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1603 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1604 || genpd == subdomain)
1509 return -EINVAL; 1605 return -EINVAL;
1510 1606
1511 start: 1607 start:
@@ -1552,6 +1648,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1552} 1648}
1553 1649
1554/** 1650/**
1651 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1652 * @master_name: Name of the master PM domain to add the subdomain to.
1653 * @subdomain_name: Name of the subdomain to be added.
1654 */
1655int pm_genpd_add_subdomain_names(const char *master_name,
1656 const char *subdomain_name)
1657{
1658 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1659
1660 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1661 return -EINVAL;
1662
1663 mutex_lock(&gpd_list_lock);
1664 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1665 if (!master && !strcmp(gpd->name, master_name))
1666 master = gpd;
1667
1668 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1669 subdomain = gpd;
1670
1671 if (master && subdomain)
1672 break;
1673 }
1674 mutex_unlock(&gpd_list_lock);
1675
1676 return pm_genpd_add_subdomain(master, subdomain);
1677}
1678
1679/**
1555 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1556 * @genpd: Master PM domain to remove the subdomain from. 1681 * @genpd: Master PM domain to remove the subdomain from.
1557 * @subdomain: Subdomain to be removed. 1682 * @subdomain: Subdomain to be removed.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1704} 1829}
1705EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1830EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1706 1831
1707int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1832/**
1833 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1834 * @genpd: PM domain to be connected with cpuidle.
1835 * @state: cpuidle state this domain can disable/enable.
1836 *
1837 * Make a PM domain behave as though it contained a CPU core, that is, instead
1838 * of calling its power down routine it will enable the given cpuidle state so
1839 * that the cpuidle subsystem can power it down (if possible and desirable).
1840 */
1841int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1708{ 1842{
1709 struct cpuidle_driver *cpuidle_drv; 1843 struct cpuidle_driver *cpuidle_drv;
1710 struct gpd_cpu_data *cpu_data; 1844 struct gpd_cpu_data *cpu_data;
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1753 goto out; 1887 goto out;
1754} 1888}
1755 1889
1756int genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1890/**
1891 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1892 * @name: Name of the domain to connect to cpuidle.
1893 * @state: cpuidle state this domain can manipulate.
1894 */
1895int pm_genpd_name_attach_cpuidle(const char *name, int state)
1896{
1897 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1898}
1899
1900/**
1901 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1902 * @genpd: PM domain to remove the cpuidle connection from.
1903 *
1904 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1905 * given PM domain.
1906 */
1907int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1757{ 1908{
1758 struct gpd_cpu_data *cpu_data; 1909 struct gpd_cpu_data *cpu_data;
1759 struct cpuidle_state *idle_state; 1910 struct cpuidle_state *idle_state;
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1784 return ret; 1935 return ret;
1785} 1936}
1786 1937
1938/**
1939 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1940 * @name: Name of the domain to disconnect cpuidle from.
1941 */
1942int pm_genpd_name_detach_cpuidle(const char *name)
1943{
1944 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1945}
1946
1787/* Default device callbacks for generic PM domains. */ 1947/* Default device callbacks for generic PM domains. */
1788 1948
1789/** 1949/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b0b072a88f5f..a3c1404c7933 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
57static int async_error; 57static int async_error;
58 58
59/** 59/**
60 * device_pm_init - Initialize the PM-related part of a device object. 60 * device_pm_sleep_init - Initialize system suspend-related device fields.
61 * @dev: Device object being initialized. 61 * @dev: Device object being initialized.
62 */ 62 */
63void device_pm_init(struct device *dev) 63void device_pm_sleep_init(struct device *dev)
64{ 64{
65 dev->power.is_prepared = false; 65 dev->power.is_prepared = false;
66 dev->power.is_suspended = false; 66 dev->power.is_suspended = false;
67 init_completion(&dev->power.completion); 67 init_completion(&dev->power.completion);
68 complete_all(&dev->power.completion); 68 complete_all(&dev->power.completion);
69 dev->power.wakeup = NULL; 69 dev->power.wakeup = NULL;
70 spin_lock_init(&dev->power.lock);
71 pm_runtime_init(dev);
72 INIT_LIST_HEAD(&dev->power.entry); 70 INIT_LIST_HEAD(&dev->power.entry);
73 dev->power.power_state = PMSG_INVALID;
74} 71}
75 72
76/** 73/**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
408 TRACE_DEVICE(dev); 405 TRACE_DEVICE(dev);
409 TRACE_RESUME(0); 406 TRACE_RESUME(0);
410 407
408 if (dev->power.syscore)
409 goto Out;
410
411 if (dev->pm_domain) { 411 if (dev->pm_domain) {
412 info = "noirq power domain "; 412 info = "noirq power domain ";
413 callback = pm_noirq_op(&dev->pm_domain->ops, state); 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
429 429
430 error = dpm_run_callback(callback, dev, state, info); 430 error = dpm_run_callback(callback, dev, state, info);
431 431
432 Out:
432 TRACE_RESUME(error); 433 TRACE_RESUME(error);
433 return error; 434 return error;
434} 435}
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
486 TRACE_DEVICE(dev); 487 TRACE_DEVICE(dev);
487 TRACE_RESUME(0); 488 TRACE_RESUME(0);
488 489
490 if (dev->power.syscore)
491 goto Out;
492
489 if (dev->pm_domain) { 493 if (dev->pm_domain) {
490 info = "early power domain "; 494 info = "early power domain ";
491 callback = pm_late_early_op(&dev->pm_domain->ops, state); 495 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
507 511
508 error = dpm_run_callback(callback, dev, state, info); 512 error = dpm_run_callback(callback, dev, state, info);
509 513
514 Out:
510 TRACE_RESUME(error); 515 TRACE_RESUME(error);
511 return error; 516 return error;
512} 517}
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
565 pm_callback_t callback = NULL; 570 pm_callback_t callback = NULL;
566 char *info = NULL; 571 char *info = NULL;
567 int error = 0; 572 int error = 0;
568 bool put = false;
569 573
570 TRACE_DEVICE(dev); 574 TRACE_DEVICE(dev);
571 TRACE_RESUME(0); 575 TRACE_RESUME(0);
572 576
577 if (dev->power.syscore)
578 goto Complete;
579
573 dpm_wait(dev->parent, async); 580 dpm_wait(dev->parent, async);
574 device_lock(dev); 581 device_lock(dev);
575 582
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
583 goto Unlock; 590 goto Unlock;
584 591
585 pm_runtime_enable(dev); 592 pm_runtime_enable(dev);
586 put = true;
587 593
588 if (dev->pm_domain) { 594 if (dev->pm_domain) {
589 info = "power domain "; 595 info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
632 638
633 Unlock: 639 Unlock:
634 device_unlock(dev); 640 device_unlock(dev);
641
642 Complete:
635 complete_all(&dev->power.completion); 643 complete_all(&dev->power.completion);
636 644
637 TRACE_RESUME(error); 645 TRACE_RESUME(error);
638 646
639 if (put)
640 pm_runtime_put_sync(dev);
641
642 return error; 647 return error;
643} 648}
644 649
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
722 void (*callback)(struct device *) = NULL; 727 void (*callback)(struct device *) = NULL;
723 char *info = NULL; 728 char *info = NULL;
724 729
730 if (dev->power.syscore)
731 return;
732
725 device_lock(dev); 733 device_lock(dev);
726 734
727 if (dev->pm_domain) { 735 if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
749 } 757 }
750 758
751 device_unlock(dev); 759 device_unlock(dev);
760
761 pm_runtime_put_sync(dev);
752} 762}
753 763
754/** 764/**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
834 pm_callback_t callback = NULL; 844 pm_callback_t callback = NULL;
835 char *info = NULL; 845 char *info = NULL;
836 846
847 if (dev->power.syscore)
848 return 0;
849
837 if (dev->pm_domain) { 850 if (dev->pm_domain) {
838 info = "noirq power domain "; 851 info = "noirq power domain ";
839 callback = pm_noirq_op(&dev->pm_domain->ops, state); 852 callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
917 pm_callback_t callback = NULL; 930 pm_callback_t callback = NULL;
918 char *info = NULL; 931 char *info = NULL;
919 932
933 if (dev->power.syscore)
934 return 0;
935
920 if (dev->pm_domain) { 936 if (dev->pm_domain) {
921 info = "late power domain "; 937 info = "late power domain ";
922 callback = pm_late_early_op(&dev->pm_domain->ops, state); 938 callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
996 1012
997 error = dpm_suspend_noirq(state); 1013 error = dpm_suspend_noirq(state);
998 if (error) { 1014 if (error) {
999 dpm_resume_early(state); 1015 dpm_resume_early(resume_event(state));
1000 return error; 1016 return error;
1001 } 1017 }
1002 1018
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1043 if (async_error) 1059 if (async_error)
1044 goto Complete; 1060 goto Complete;
1045 1061
1046 pm_runtime_get_noresume(dev); 1062 /*
1063 * If a device configured to wake up the system from sleep states
1064 * has been suspended at run time and there's a resume request pending
1065 * for it, this is equivalent to the device signaling wakeup, so the
1066 * system suspend operation should be aborted.
1067 */
1047 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1068 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1048 pm_wakeup_event(dev, 0); 1069 pm_wakeup_event(dev, 0);
1049 1070
1050 if (pm_wakeup_pending()) { 1071 if (pm_wakeup_pending()) {
1051 pm_runtime_put_sync(dev);
1052 async_error = -EBUSY; 1072 async_error = -EBUSY;
1053 goto Complete; 1073 goto Complete;
1054 } 1074 }
1055 1075
1076 if (dev->power.syscore)
1077 goto Complete;
1078
1056 device_lock(dev); 1079 device_lock(dev);
1057 1080
1058 if (dev->pm_domain) { 1081 if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1111 Complete: 1134 Complete:
1112 complete_all(&dev->power.completion); 1135 complete_all(&dev->power.completion);
1113 1136
1114 if (error) { 1137 if (error)
1115 pm_runtime_put_sync(dev);
1116 async_error = error; 1138 async_error = error;
1117 } else if (dev->power.is_suspended) { 1139 else if (dev->power.is_suspended)
1118 __pm_runtime_disable(dev, false); 1140 __pm_runtime_disable(dev, false);
1119 }
1120 1141
1121 return error; 1142 return error;
1122} 1143}
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
1209 char *info = NULL; 1230 char *info = NULL;
1210 int error = 0; 1231 int error = 0;
1211 1232
1233 if (dev->power.syscore)
1234 return 0;
1235
1236 /*
1237 * If a device's parent goes into runtime suspend at the wrong time,
1238 * it won't be possible to resume the device. To prevent this we
1239 * block runtime suspend here, during the prepare phase, and allow
1240 * it again during the complete phase.
1241 */
1242 pm_runtime_get_noresume(dev);
1243
1212 device_lock(dev); 1244 device_lock(dev);
1213 1245
1214 dev->power.wakeup_path = device_may_wakeup(dev); 1246 dev->power.wakeup_path = device_may_wakeup(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993eafec82..d9468642fc41 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,7 @@
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/opp.h>
25#include <linux/of.h>
25 26
26/* 27/*
27 * Internal data structure organization with the OPP layer library is as 28 * Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
674 675
675 return &dev_opp->head; 676 return &dev_opp->head;
676} 677}
678
679#ifdef CONFIG_OF
680/**
681 * of_init_opp_table() - Initialize opp table from device tree
682 * @dev: device pointer used to lookup device OPPs.
683 *
684 * Register the initial OPP table with the OPP library for given device.
685 */
686int of_init_opp_table(struct device *dev)
687{
688 const struct property *prop;
689 const __be32 *val;
690 int nr;
691
692 prop = of_find_property(dev->of_node, "operating-points", NULL);
693 if (!prop)
694 return -ENODEV;
695 if (!prop->value)
696 return -ENODATA;
697
698 /*
699 * Each OPP is a set of tuples consisting of frequency and
700 * voltage like <freq-kHz vol-uV>.
701 */
702 nr = prop->length / sizeof(u32);
703 if (nr % 2) {
704 dev_err(dev, "%s: Invalid OPP list\n", __func__);
705 return -EINVAL;
706 }
707
708 val = prop->value;
709 while (nr) {
710 unsigned long freq = be32_to_cpup(val++) * 1000;
711 unsigned long volt = be32_to_cpup(val++);
712
713 if (opp_add(dev, freq, volt)) {
714 dev_warn(dev, "%s: Failed to add OPP %ld\n",
715 __func__, freq);
716 continue;
717 }
718 nr -= 2;
719 }
720
721 return 0;
722}
723#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index eeb4bff9505c..0dbfdf4419af 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,12 +1,32 @@
1#include <linux/pm_qos.h> 1#include <linux/pm_qos.h>
2 2
3static inline void device_pm_init_common(struct device *dev)
4{
5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID;
8 dev->power.early_init = true;
9 }
10}
11
3#ifdef CONFIG_PM_RUNTIME 12#ifdef CONFIG_PM_RUNTIME
4 13
14static inline void pm_runtime_early_init(struct device *dev)
15{
16 dev->power.disable_depth = 1;
17 device_pm_init_common(dev);
18}
19
5extern void pm_runtime_init(struct device *dev); 20extern void pm_runtime_init(struct device *dev);
6extern void pm_runtime_remove(struct device *dev); 21extern void pm_runtime_remove(struct device *dev);
7 22
8#else /* !CONFIG_PM_RUNTIME */ 23#else /* !CONFIG_PM_RUNTIME */
9 24
25static inline void pm_runtime_early_init(struct device *dev)
26{
27 device_pm_init_common(dev);
28}
29
10static inline void pm_runtime_init(struct device *dev) {} 30static inline void pm_runtime_init(struct device *dev) {}
11static inline void pm_runtime_remove(struct device *dev) {} 31static inline void pm_runtime_remove(struct device *dev) {}
12 32
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
25 return container_of(entry, struct device, power.entry); 45 return container_of(entry, struct device, power.entry);
26} 46}
27 47
28extern void device_pm_init(struct device *dev); 48extern void device_pm_sleep_init(struct device *dev);
29extern void device_pm_add(struct device *); 49extern void device_pm_add(struct device *);
30extern void device_pm_remove(struct device *); 50extern void device_pm_remove(struct device *);
31extern void device_pm_move_before(struct device *, struct device *); 51extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
34 54
35#else /* !CONFIG_PM_SLEEP */ 55#else /* !CONFIG_PM_SLEEP */
36 56
37static inline void device_pm_init(struct device *dev) 57static inline void device_pm_sleep_init(struct device *dev) {}
38{
39 spin_lock_init(&dev->power.lock);
40 dev->power.power_state = PMSG_INVALID;
41 pm_runtime_init(dev);
42}
43 58
44static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev)
45{ 60{
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
60 75
61#endif /* !CONFIG_PM_SLEEP */ 76#endif /* !CONFIG_PM_SLEEP */
62 77
78static inline void device_pm_init(struct device *dev)
79{
80 device_pm_init_common(dev);
81 device_pm_sleep_init(dev);
82 pm_runtime_init(dev);
83}
84
63#ifdef CONFIG_PM 85#ifdef CONFIG_PM
64 86
65/* 87/*
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7d9c1cb1c39a..3148b10dc2e5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
509 repeat: 509 repeat:
510 if (dev->power.runtime_error) 510 if (dev->power.runtime_error)
511 retval = -EINVAL; 511 retval = -EINVAL;
512 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
513 && dev->power.runtime_status == RPM_ACTIVE)
514 retval = 1;
512 else if (dev->power.disable_depth > 0) 515 else if (dev->power.disable_depth > 0)
513 retval = -EACCES; 516 retval = -EACCES;
514 if (retval) 517 if (retval)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cbb463b3a750..e6ee5e80e546 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
127 */ 127 */
128void wakeup_source_add(struct wakeup_source *ws) 128void wakeup_source_add(struct wakeup_source *ws)
129{ 129{
130 unsigned long flags;
131
130 if (WARN_ON(!ws)) 132 if (WARN_ON(!ws))
131 return; 133 return;
132 134
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
135 ws->active = false; 137 ws->active = false;
136 ws->last_time = ktime_get(); 138 ws->last_time = ktime_get();
137 139
138 spin_lock_irq(&events_lock); 140 spin_lock_irqsave(&events_lock, flags);
139 list_add_rcu(&ws->entry, &wakeup_sources); 141 list_add_rcu(&ws->entry, &wakeup_sources);
140 spin_unlock_irq(&events_lock); 142 spin_unlock_irqrestore(&events_lock, flags);
141} 143}
142EXPORT_SYMBOL_GPL(wakeup_source_add); 144EXPORT_SYMBOL_GPL(wakeup_source_add);
143 145
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
147 */ 149 */
148void wakeup_source_remove(struct wakeup_source *ws) 150void wakeup_source_remove(struct wakeup_source *ws)
149{ 151{
152 unsigned long flags;
153
150 if (WARN_ON(!ws)) 154 if (WARN_ON(!ws))
151 return; 155 return;
152 156
153 spin_lock_irq(&events_lock); 157 spin_lock_irqsave(&events_lock, flags);
154 list_del_rcu(&ws->entry); 158 list_del_rcu(&ws->entry);
155 spin_unlock_irq(&events_lock); 159 spin_unlock_irqrestore(&events_lock, flags);
156 synchronize_rcu(); 160 synchronize_rcu();
157} 161}
158EXPORT_SYMBOL_GPL(wakeup_source_remove); 162EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
649} 653}
650EXPORT_SYMBOL_GPL(pm_wakeup_event); 654EXPORT_SYMBOL_GPL(pm_wakeup_event);
651 655
656static void print_active_wakeup_sources(void)
657{
658 struct wakeup_source *ws;
659 int active = 0;
660 struct wakeup_source *last_activity_ws = NULL;
661
662 rcu_read_lock();
663 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
664 if (ws->active) {
665 pr_info("active wakeup source: %s\n", ws->name);
666 active = 1;
667 } else if (!active &&
668 (!last_activity_ws ||
669 ktime_to_ns(ws->last_time) >
670 ktime_to_ns(last_activity_ws->last_time))) {
671 last_activity_ws = ws;
672 }
673 }
674
675 if (!active && last_activity_ws)
676 pr_info("last active wakeup source: %s\n",
677 last_activity_ws->name);
678 rcu_read_unlock();
679}
680
652/** 681/**
653 * pm_wakeup_pending - Check if power transition in progress should be aborted. 682 * pm_wakeup_pending - Check if power transition in progress should be aborted.
654 * 683 *
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
671 events_check_enabled = !ret; 700 events_check_enabled = !ret;
672 } 701 }
673 spin_unlock_irqrestore(&events_lock, flags); 702 spin_unlock_irqrestore(&events_lock, flags);
703
704 if (ret)
705 print_active_wakeup_sources();
706
674 return ret; 707 return ret;
675} 708}
676 709
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
723bool pm_save_wakeup_count(unsigned int count) 756bool pm_save_wakeup_count(unsigned int count)
724{ 757{
725 unsigned int cnt, inpr; 758 unsigned int cnt, inpr;
759 unsigned long flags;
726 760
727 events_check_enabled = false; 761 events_check_enabled = false;
728 spin_lock_irq(&events_lock); 762 spin_lock_irqsave(&events_lock, flags);
729 split_counters(&cnt, &inpr); 763 split_counters(&cnt, &inpr);
730 if (cnt == count && inpr == 0) { 764 if (cnt == count && inpr == 0) {
731 saved_count = count; 765 saved_count = count;
732 events_check_enabled = true; 766 events_check_enabled = true;
733 } 767 }
734 spin_unlock_irq(&events_lock); 768 spin_unlock_irqrestore(&events_lock, flags);
735 return events_check_enabled; 769 return events_check_enabled;
736} 770}
737 771