diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/core.c | 9 | ||||
-rw-r--r-- | drivers/base/dma-contiguous.c | 2 | ||||
-rw-r--r-- | drivers/base/platform.c | 2 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 244 | ||||
-rw-r--r-- | drivers/base/power/main.c | 35 | ||||
-rw-r--r-- | drivers/base/power/power.h | 36 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 13 |
7 files changed, 276 insertions, 65 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index f338037a4f3d..5e6e00bc1652 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev, | |||
1865 | struct va_format *vaf) | 1865 | struct va_format *vaf) |
1866 | { | 1866 | { |
1867 | char dict[128]; | 1867 | char dict[128]; |
1868 | const char *level_extra = ""; | ||
1868 | size_t dictlen = 0; | 1869 | size_t dictlen = 0; |
1869 | const char *subsys; | 1870 | const char *subsys; |
1870 | 1871 | ||
@@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev, | |||
1911 | "DEVICE=+%s:%s", subsys, dev_name(dev)); | 1912 | "DEVICE=+%s:%s", subsys, dev_name(dev)); |
1912 | } | 1913 | } |
1913 | skip: | 1914 | skip: |
1915 | if (level[2]) | ||
1916 | level_extra = &level[2]; /* skip past KERN_SOH "L" */ | ||
1917 | |||
1914 | return printk_emit(0, level[1] - '0', | 1918 | return printk_emit(0, level[1] - '0', |
1915 | dictlen ? dict : NULL, dictlen, | 1919 | dictlen ? dict : NULL, dictlen, |
1916 | "%s %s: %pV", | 1920 | "%s %s: %s%pV", |
1917 | dev_driver_string(dev), dev_name(dev), vaf); | 1921 | dev_driver_string(dev), dev_name(dev), |
1922 | level_extra, vaf); | ||
1918 | } | 1923 | } |
1919 | EXPORT_SYMBOL(__dev_printk); | 1924 | EXPORT_SYMBOL(__dev_printk); |
1920 | 1925 | ||
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c | |||
@@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, | |||
250 | return -EINVAL; | 250 | return -EINVAL; |
251 | 251 | ||
252 | /* Sanitise input arguments */ | 252 | /* Sanitise input arguments */ |
253 | alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); | 253 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); |
254 | base = ALIGN(base, alignment); | 254 | base = ALIGN(base, alignment); |
255 | size = ALIGN(size, alignment); | 255 | size = ALIGN(size, alignment); |
256 | limit &= ~(alignment - 1); | 256 | limit &= ~(alignment - 1); |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index a1a722502587..d51514b79efe 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/pm_runtime.h> | 22 | #include <linux/pm_runtime.h> |
23 | 23 | ||
24 | #include "base.h" | 24 | #include "base.h" |
25 | #include "power/power.h" | ||
25 | 26 | ||
26 | #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ | 27 | #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ |
27 | driver)) | 28 | driver)) |
@@ -948,6 +949,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num) | |||
948 | dev = &devs[i]->dev; | 949 | dev = &devs[i]->dev; |
949 | 950 | ||
950 | if (!dev->devres_head.next) { | 951 | if (!dev->devres_head.next) { |
952 | pm_runtime_early_init(dev); | ||
951 | INIT_LIST_HEAD(&dev->devres_head); | 953 | INIT_LIST_HEAD(&dev->devres_head); |
952 | list_add_tail(&dev->devres_head, | 954 | list_add_tail(&dev->devres_head, |
953 | &early_platform_device_list); | 955 | &early_platform_device_list); |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba3487c9835b..c22b869245d9 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -53,6 +53,24 @@ | |||
53 | static LIST_HEAD(gpd_list); | 53 | static LIST_HEAD(gpd_list); |
54 | static DEFINE_MUTEX(gpd_list_lock); | 54 | static DEFINE_MUTEX(gpd_list_lock); |
55 | 55 | ||
56 | static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) | ||
57 | { | ||
58 | struct generic_pm_domain *genpd = NULL, *gpd; | ||
59 | |||
60 | if (IS_ERR_OR_NULL(domain_name)) | ||
61 | return NULL; | ||
62 | |||
63 | mutex_lock(&gpd_list_lock); | ||
64 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
65 | if (!strcmp(gpd->name, domain_name)) { | ||
66 | genpd = gpd; | ||
67 | break; | ||
68 | } | ||
69 | } | ||
70 | mutex_unlock(&gpd_list_lock); | ||
71 | return genpd; | ||
72 | } | ||
73 | |||
56 | #ifdef CONFIG_PM | 74 | #ifdef CONFIG_PM |
57 | 75 | ||
58 | struct generic_pm_domain *dev_to_genpd(struct device *dev) | 76 | struct generic_pm_domain *dev_to_genpd(struct device *dev) |
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
256 | return ret; | 274 | return ret; |
257 | } | 275 | } |
258 | 276 | ||
277 | /** | ||
278 | * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. | ||
279 | * @domain_name: Name of the PM domain to power up. | ||
280 | */ | ||
281 | int pm_genpd_name_poweron(const char *domain_name) | ||
282 | { | ||
283 | struct generic_pm_domain *genpd; | ||
284 | |||
285 | genpd = pm_genpd_lookup_name(domain_name); | ||
286 | return genpd ? pm_genpd_poweron(genpd) : -EINVAL; | ||
287 | } | ||
288 | |||
259 | #endif /* CONFIG_PM */ | 289 | #endif /* CONFIG_PM */ |
260 | 290 | ||
261 | #ifdef CONFIG_PM_RUNTIME | 291 | #ifdef CONFIG_PM_RUNTIME |
262 | 292 | ||
293 | static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, | ||
294 | struct device *dev) | ||
295 | { | ||
296 | return GENPD_DEV_CALLBACK(genpd, int, start, dev); | ||
297 | } | ||
298 | |||
263 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) | 299 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) |
264 | { | 300 | { |
265 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, | 301 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, |
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
436 | not_suspended = 0; | 472 | not_suspended = 0; |
437 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 473 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
438 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | 474 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
439 | || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) | 475 | || pdd->dev->power.irq_safe)) |
440 | not_suspended++; | 476 | not_suspended++; |
441 | 477 | ||
442 | if (not_suspended > genpd->in_progress) | 478 | if (not_suspended > genpd->in_progress) |
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
578 | 614 | ||
579 | might_sleep_if(!genpd->dev_irq_safe); | 615 | might_sleep_if(!genpd->dev_irq_safe); |
580 | 616 | ||
581 | if (dev_gpd_data(dev)->always_on) | ||
582 | return -EBUSY; | ||
583 | |||
584 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 617 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; |
585 | if (stop_ok && !stop_ok(dev)) | 618 | if (stop_ok && !stop_ok(dev)) |
586 | return -EBUSY; | 619 | return -EBUSY; |
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
629 | 662 | ||
630 | /* If power.irq_safe, the PM domain is never powered off. */ | 663 | /* If power.irq_safe, the PM domain is never powered off. */ |
631 | if (dev->power.irq_safe) | 664 | if (dev->power.irq_safe) |
632 | return genpd_start_dev(genpd, dev); | 665 | return genpd_start_dev_no_timing(genpd, dev); |
633 | 666 | ||
634 | mutex_lock(&genpd->lock); | 667 | mutex_lock(&genpd->lock); |
635 | ret = __pm_genpd_poweron(genpd); | 668 | ret = __pm_genpd_poweron(genpd); |
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} | |||
697 | 730 | ||
698 | #ifdef CONFIG_PM_SLEEP | 731 | #ifdef CONFIG_PM_SLEEP |
699 | 732 | ||
733 | /** | ||
734 | * pm_genpd_present - Check if the given PM domain has been initialized. | ||
735 | * @genpd: PM domain to check. | ||
736 | */ | ||
737 | static bool pm_genpd_present(struct generic_pm_domain *genpd) | ||
738 | { | ||
739 | struct generic_pm_domain *gpd; | ||
740 | |||
741 | if (IS_ERR_OR_NULL(genpd)) | ||
742 | return false; | ||
743 | |||
744 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) | ||
745 | if (gpd == genpd) | ||
746 | return true; | ||
747 | |||
748 | return false; | ||
749 | } | ||
750 | |||
700 | static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, | 751 | static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, |
701 | struct device *dev) | 752 | struct device *dev) |
702 | { | 753 | { |
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) | |||
750 | * Check if the given PM domain can be powered off (during system suspend or | 801 | * Check if the given PM domain can be powered off (during system suspend or |
751 | * hibernation) and do that if so. Also, in that case propagate to its masters. | 802 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
752 | * | 803 | * |
753 | * This function is only called in "noirq" stages of system power transitions, | 804 | * This function is only called in "noirq" and "syscore" stages of system power |
754 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 805 | * transitions, so it need not acquire locks (all of the "noirq" callbacks are |
755 | * sequentially, so it is guaranteed that it will never run twice in parallel). | 806 | * executed sequentially, so it is guaranteed that it will never run twice in |
807 | * parallel). | ||
756 | */ | 808 | */ |
757 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 809 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
758 | { | 810 | { |
@@ -777,6 +829,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | |||
777 | } | 829 | } |
778 | 830 | ||
779 | /** | 831 | /** |
832 | * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. | ||
833 | * @genpd: PM domain to power on. | ||
834 | * | ||
835 | * This function is only called in "noirq" and "syscore" stages of system power | ||
836 | * transitions, so it need not acquire locks (all of the "noirq" callbacks are | ||
837 | * executed sequentially, so it is guaranteed that it will never run twice in | ||
838 | * parallel). | ||
839 | */ | ||
840 | static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) | ||
841 | { | ||
842 | struct gpd_link *link; | ||
843 | |||
844 | if (genpd->status != GPD_STATE_POWER_OFF) | ||
845 | return; | ||
846 | |||
847 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
848 | pm_genpd_sync_poweron(link->master); | ||
849 | genpd_sd_counter_inc(link->master); | ||
850 | } | ||
851 | |||
852 | if (genpd->power_on) | ||
853 | genpd->power_on(genpd); | ||
854 | |||
855 | genpd->status = GPD_STATE_ACTIVE; | ||
856 | } | ||
857 | |||
858 | /** | ||
780 | * resume_needed - Check whether to resume a device before system suspend. | 859 | * resume_needed - Check whether to resume a device before system suspend. |
781 | * @dev: Device to check. | 860 | * @dev: Device to check. |
782 | * @genpd: PM domain the device belongs to. | 861 | * @genpd: PM domain the device belongs to. |
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
937 | if (IS_ERR(genpd)) | 1016 | if (IS_ERR(genpd)) |
938 | return -EINVAL; | 1017 | return -EINVAL; |
939 | 1018 | ||
940 | if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on | 1019 | if (genpd->suspend_power_off |
941 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | 1020 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) |
942 | return 0; | 1021 | return 0; |
943 | 1022 | ||
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
970 | if (IS_ERR(genpd)) | 1049 | if (IS_ERR(genpd)) |
971 | return -EINVAL; | 1050 | return -EINVAL; |
972 | 1051 | ||
973 | if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on | 1052 | if (genpd->suspend_power_off |
974 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) | 1053 | || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) |
975 | return 0; | 1054 | return 0; |
976 | 1055 | ||
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev) | |||
979 | * guaranteed that this function will never run twice in parallel for | 1058 | * guaranteed that this function will never run twice in parallel for |
980 | * the same PM domain, so it is not necessary to use locking here. | 1059 | * the same PM domain, so it is not necessary to use locking here. |
981 | */ | 1060 | */ |
982 | pm_genpd_poweron(genpd); | 1061 | pm_genpd_sync_poweron(genpd); |
983 | genpd->suspended_count--; | 1062 | genpd->suspended_count--; |
984 | 1063 | ||
985 | return genpd_start_dev(genpd, dev); | 1064 | return genpd_start_dev(genpd, dev); |
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) | |||
1090 | if (IS_ERR(genpd)) | 1169 | if (IS_ERR(genpd)) |
1091 | return -EINVAL; | 1170 | return -EINVAL; |
1092 | 1171 | ||
1093 | return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? | 1172 | return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); |
1094 | 0 : genpd_stop_dev(genpd, dev); | ||
1095 | } | 1173 | } |
1096 | 1174 | ||
1097 | /** | 1175 | /** |
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) | |||
1111 | if (IS_ERR(genpd)) | 1189 | if (IS_ERR(genpd)) |
1112 | return -EINVAL; | 1190 | return -EINVAL; |
1113 | 1191 | ||
1114 | return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? | 1192 | return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); |
1115 | 0 : genpd_start_dev(genpd, dev); | ||
1116 | } | 1193 | } |
1117 | 1194 | ||
1118 | /** | 1195 | /** |
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev) | |||
1186 | if (genpd->suspended_count++ == 0) { | 1263 | if (genpd->suspended_count++ == 0) { |
1187 | /* | 1264 | /* |
1188 | * The boot kernel might put the domain into arbitrary state, | 1265 | * The boot kernel might put the domain into arbitrary state, |
1189 | * so make it appear as powered off to pm_genpd_poweron(), so | 1266 | * so make it appear as powered off to pm_genpd_sync_poweron(), |
1190 | * that it tries to power it on in case it was really off. | 1267 | * so that it tries to power it on in case it was really off. |
1191 | */ | 1268 | */ |
1192 | genpd->status = GPD_STATE_POWER_OFF; | 1269 | genpd->status = GPD_STATE_POWER_OFF; |
1193 | if (genpd->suspend_power_off) { | 1270 | if (genpd->suspend_power_off) { |
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev) | |||
1205 | if (genpd->suspend_power_off) | 1282 | if (genpd->suspend_power_off) |
1206 | return 0; | 1283 | return 0; |
1207 | 1284 | ||
1208 | pm_genpd_poweron(genpd); | 1285 | pm_genpd_sync_poweron(genpd); |
1209 | 1286 | ||
1210 | return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); | 1287 | return genpd_start_dev(genpd, dev); |
1211 | } | 1288 | } |
1212 | 1289 | ||
1213 | /** | 1290 | /** |
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev) | |||
1246 | } | 1323 | } |
1247 | } | 1324 | } |
1248 | 1325 | ||
1326 | /** | ||
1327 | * pm_genpd_syscore_switch - Switch power during system core suspend or resume. | ||
1328 | * @dev: Device that normally is marked as "always on" to switch power for. | ||
1329 | * | ||
1330 | * This routine may only be called during the system core (syscore) suspend or | ||
1331 | * resume phase for devices whose "always on" flags are set. | ||
1332 | */ | ||
1333 | void pm_genpd_syscore_switch(struct device *dev, bool suspend) | ||
1334 | { | ||
1335 | struct generic_pm_domain *genpd; | ||
1336 | |||
1337 | genpd = dev_to_genpd(dev); | ||
1338 | if (!pm_genpd_present(genpd)) | ||
1339 | return; | ||
1340 | |||
1341 | if (suspend) { | ||
1342 | genpd->suspended_count++; | ||
1343 | pm_genpd_sync_poweroff(genpd); | ||
1344 | } else { | ||
1345 | pm_genpd_sync_poweron(genpd); | ||
1346 | genpd->suspended_count--; | ||
1347 | } | ||
1348 | } | ||
1349 | EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); | ||
1350 | |||
1249 | #else | 1351 | #else |
1250 | 1352 | ||
1251 | #define pm_genpd_prepare NULL | 1353 | #define pm_genpd_prepare NULL |
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, | |||
1393 | return __pm_genpd_add_device(genpd, dev, td); | 1495 | return __pm_genpd_add_device(genpd, dev, td); |
1394 | } | 1496 | } |
1395 | 1497 | ||
1498 | |||
1499 | /** | ||
1500 | * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. | ||
1501 | * @domain_name: Name of the PM domain to add the device to. | ||
1502 | * @dev: Device to be added. | ||
1503 | * @td: Set of PM QoS timing parameters to attach to the device. | ||
1504 | */ | ||
1505 | int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, | ||
1506 | struct gpd_timing_data *td) | ||
1507 | { | ||
1508 | return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); | ||
1509 | } | ||
1510 | |||
1396 | /** | 1511 | /** |
1397 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | 1512 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. |
1398 | * @genpd: PM domain to remove the device from. | 1513 | * @genpd: PM domain to remove the device from. |
@@ -1455,26 +1570,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1455 | } | 1570 | } |
1456 | 1571 | ||
1457 | /** | 1572 | /** |
1458 | * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. | ||
1459 | * @dev: Device to set/unset the flag for. | ||
1460 | * @val: The new value of the device's "always on" flag. | ||
1461 | */ | ||
1462 | void pm_genpd_dev_always_on(struct device *dev, bool val) | ||
1463 | { | ||
1464 | struct pm_subsys_data *psd; | ||
1465 | unsigned long flags; | ||
1466 | |||
1467 | spin_lock_irqsave(&dev->power.lock, flags); | ||
1468 | |||
1469 | psd = dev_to_psd(dev); | ||
1470 | if (psd && psd->domain_data) | ||
1471 | to_gpd_data(psd->domain_data)->always_on = val; | ||
1472 | |||
1473 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
1474 | } | ||
1475 | EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); | ||
1476 | |||
1477 | /** | ||
1478 | * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. | 1573 | * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. |
1479 | * @dev: Device to set/unset the flag for. | 1574 | * @dev: Device to set/unset the flag for. |
1480 | * @val: The new value of the device's "need restore" flag. | 1575 | * @val: The new value of the device's "need restore" flag. |
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1505 | struct gpd_link *link; | 1600 | struct gpd_link *link; |
1506 | int ret = 0; | 1601 | int ret = 0; |
1507 | 1602 | ||
1508 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) | 1603 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) |
1604 | || genpd == subdomain) | ||
1509 | return -EINVAL; | 1605 | return -EINVAL; |
1510 | 1606 | ||
1511 | start: | 1607 | start: |
@@ -1552,6 +1648,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1552 | } | 1648 | } |
1553 | 1649 | ||
1554 | /** | 1650 | /** |
1651 | * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. | ||
1652 | * @master_name: Name of the master PM domain to add the subdomain to. | ||
1653 | * @subdomain_name: Name of the subdomain to be added. | ||
1654 | */ | ||
1655 | int pm_genpd_add_subdomain_names(const char *master_name, | ||
1656 | const char *subdomain_name) | ||
1657 | { | ||
1658 | struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; | ||
1659 | |||
1660 | if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) | ||
1661 | return -EINVAL; | ||
1662 | |||
1663 | mutex_lock(&gpd_list_lock); | ||
1664 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | ||
1665 | if (!master && !strcmp(gpd->name, master_name)) | ||
1666 | master = gpd; | ||
1667 | |||
1668 | if (!subdomain && !strcmp(gpd->name, subdomain_name)) | ||
1669 | subdomain = gpd; | ||
1670 | |||
1671 | if (master && subdomain) | ||
1672 | break; | ||
1673 | } | ||
1674 | mutex_unlock(&gpd_list_lock); | ||
1675 | |||
1676 | return pm_genpd_add_subdomain(master, subdomain); | ||
1677 | } | ||
1678 | |||
1679 | /** | ||
1555 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1680 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1556 | * @genpd: Master PM domain to remove the subdomain from. | 1681 | * @genpd: Master PM domain to remove the subdomain from. |
1557 | * @subdomain: Subdomain to be removed. | 1682 | * @subdomain: Subdomain to be removed. |
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | |||
1704 | } | 1829 | } |
1705 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); | 1830 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); |
1706 | 1831 | ||
1707 | int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | 1832 | /** |
1833 | * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. | ||
1834 | * @genpd: PM domain to be connected with cpuidle. | ||
1835 | * @state: cpuidle state this domain can disable/enable. | ||
1836 | * | ||
1837 | * Make a PM domain behave as though it contained a CPU core, that is, instead | ||
1838 | * of calling its power down routine it will enable the given cpuidle state so | ||
1839 | * that the cpuidle subsystem can power it down (if possible and desirable). | ||
1840 | */ | ||
1841 | int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | ||
1708 | { | 1842 | { |
1709 | struct cpuidle_driver *cpuidle_drv; | 1843 | struct cpuidle_driver *cpuidle_drv; |
1710 | struct gpd_cpu_data *cpu_data; | 1844 | struct gpd_cpu_data *cpu_data; |
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) | |||
1753 | goto out; | 1887 | goto out; |
1754 | } | 1888 | } |
1755 | 1889 | ||
1756 | int genpd_detach_cpuidle(struct generic_pm_domain *genpd) | 1890 | /** |
1891 | * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. | ||
1892 | * @name: Name of the domain to connect to cpuidle. | ||
1893 | * @state: cpuidle state this domain can manipulate. | ||
1894 | */ | ||
1895 | int pm_genpd_name_attach_cpuidle(const char *name, int state) | ||
1896 | { | ||
1897 | return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); | ||
1898 | } | ||
1899 | |||
1900 | /** | ||
1901 | * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. | ||
1902 | * @genpd: PM domain to remove the cpuidle connection from. | ||
1903 | * | ||
1904 | * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the | ||
1905 | * given PM domain. | ||
1906 | */ | ||
1907 | int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | ||
1757 | { | 1908 | { |
1758 | struct gpd_cpu_data *cpu_data; | 1909 | struct gpd_cpu_data *cpu_data; |
1759 | struct cpuidle_state *idle_state; | 1910 | struct cpuidle_state *idle_state; |
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd) | |||
1784 | return ret; | 1935 | return ret; |
1785 | } | 1936 | } |
1786 | 1937 | ||
1938 | /** | ||
1939 | * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. | ||
1940 | * @name: Name of the domain to disconnect cpuidle from. | ||
1941 | */ | ||
1942 | int pm_genpd_name_detach_cpuidle(const char *name) | ||
1943 | { | ||
1944 | return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); | ||
1945 | } | ||
1946 | |||
1787 | /* Default device callbacks for generic PM domains. */ | 1947 | /* Default device callbacks for generic PM domains. */ |
1788 | 1948 | ||
1789 | /** | 1949 | /** |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0113adc310dc..57f5814c2732 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -57,20 +57,17 @@ static pm_message_t pm_transition; | |||
57 | static int async_error; | 57 | static int async_error; |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * device_pm_init - Initialize the PM-related part of a device object. | 60 | * device_pm_sleep_init - Initialize system suspend-related device fields. |
61 | * @dev: Device object being initialized. | 61 | * @dev: Device object being initialized. |
62 | */ | 62 | */ |
63 | void device_pm_init(struct device *dev) | 63 | void device_pm_sleep_init(struct device *dev) |
64 | { | 64 | { |
65 | dev->power.is_prepared = false; | 65 | dev->power.is_prepared = false; |
66 | dev->power.is_suspended = false; | 66 | dev->power.is_suspended = false; |
67 | init_completion(&dev->power.completion); | 67 | init_completion(&dev->power.completion); |
68 | complete_all(&dev->power.completion); | 68 | complete_all(&dev->power.completion); |
69 | dev->power.wakeup = NULL; | 69 | dev->power.wakeup = NULL; |
70 | spin_lock_init(&dev->power.lock); | ||
71 | pm_runtime_init(dev); | ||
72 | INIT_LIST_HEAD(&dev->power.entry); | 70 | INIT_LIST_HEAD(&dev->power.entry); |
73 | dev->power.power_state = PMSG_INVALID; | ||
74 | } | 71 | } |
75 | 72 | ||
76 | /** | 73 | /** |
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
408 | TRACE_DEVICE(dev); | 405 | TRACE_DEVICE(dev); |
409 | TRACE_RESUME(0); | 406 | TRACE_RESUME(0); |
410 | 407 | ||
408 | if (dev->power.syscore) | ||
409 | goto Out; | ||
410 | |||
411 | if (dev->pm_domain) { | 411 | if (dev->pm_domain) { |
412 | info = "noirq power domain "; | 412 | info = "noirq power domain "; |
413 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 413 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
429 | 429 | ||
430 | error = dpm_run_callback(callback, dev, state, info); | 430 | error = dpm_run_callback(callback, dev, state, info); |
431 | 431 | ||
432 | Out: | ||
432 | TRACE_RESUME(error); | 433 | TRACE_RESUME(error); |
433 | return error; | 434 | return error; |
434 | } | 435 | } |
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state) | |||
486 | TRACE_DEVICE(dev); | 487 | TRACE_DEVICE(dev); |
487 | TRACE_RESUME(0); | 488 | TRACE_RESUME(0); |
488 | 489 | ||
490 | if (dev->power.syscore) | ||
491 | goto Out; | ||
492 | |||
489 | if (dev->pm_domain) { | 493 | if (dev->pm_domain) { |
490 | info = "early power domain "; | 494 | info = "early power domain "; |
491 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | 495 | callback = pm_late_early_op(&dev->pm_domain->ops, state); |
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state) | |||
507 | 511 | ||
508 | error = dpm_run_callback(callback, dev, state, info); | 512 | error = dpm_run_callback(callback, dev, state, info); |
509 | 513 | ||
514 | Out: | ||
510 | TRACE_RESUME(error); | 515 | TRACE_RESUME(error); |
511 | return error; | 516 | return error; |
512 | } | 517 | } |
@@ -570,6 +575,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
570 | TRACE_DEVICE(dev); | 575 | TRACE_DEVICE(dev); |
571 | TRACE_RESUME(0); | 576 | TRACE_RESUME(0); |
572 | 577 | ||
578 | if (dev->power.syscore) | ||
579 | goto Complete; | ||
580 | |||
573 | dpm_wait(dev->parent, async); | 581 | dpm_wait(dev->parent, async); |
574 | device_lock(dev); | 582 | device_lock(dev); |
575 | 583 | ||
@@ -632,6 +640,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
632 | 640 | ||
633 | Unlock: | 641 | Unlock: |
634 | device_unlock(dev); | 642 | device_unlock(dev); |
643 | |||
644 | Complete: | ||
635 | complete_all(&dev->power.completion); | 645 | complete_all(&dev->power.completion); |
636 | 646 | ||
637 | TRACE_RESUME(error); | 647 | TRACE_RESUME(error); |
@@ -722,6 +732,9 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
722 | void (*callback)(struct device *) = NULL; | 732 | void (*callback)(struct device *) = NULL; |
723 | char *info = NULL; | 733 | char *info = NULL; |
724 | 734 | ||
735 | if (dev->power.syscore) | ||
736 | return; | ||
737 | |||
725 | device_lock(dev); | 738 | device_lock(dev); |
726 | 739 | ||
727 | if (dev->pm_domain) { | 740 | if (dev->pm_domain) { |
@@ -834,6 +847,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
834 | pm_callback_t callback = NULL; | 847 | pm_callback_t callback = NULL; |
835 | char *info = NULL; | 848 | char *info = NULL; |
836 | 849 | ||
850 | if (dev->power.syscore) | ||
851 | return 0; | ||
852 | |||
837 | if (dev->pm_domain) { | 853 | if (dev->pm_domain) { |
838 | info = "noirq power domain "; | 854 | info = "noirq power domain "; |
839 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 855 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
@@ -917,6 +933,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state) | |||
917 | pm_callback_t callback = NULL; | 933 | pm_callback_t callback = NULL; |
918 | char *info = NULL; | 934 | char *info = NULL; |
919 | 935 | ||
936 | if (dev->power.syscore) | ||
937 | return 0; | ||
938 | |||
920 | if (dev->pm_domain) { | 939 | if (dev->pm_domain) { |
921 | info = "late power domain "; | 940 | info = "late power domain "; |
922 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | 941 | callback = pm_late_early_op(&dev->pm_domain->ops, state); |
@@ -1053,6 +1072,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1053 | goto Complete; | 1072 | goto Complete; |
1054 | } | 1073 | } |
1055 | 1074 | ||
1075 | if (dev->power.syscore) | ||
1076 | goto Complete; | ||
1077 | |||
1056 | device_lock(dev); | 1078 | device_lock(dev); |
1057 | 1079 | ||
1058 | if (dev->pm_domain) { | 1080 | if (dev->pm_domain) { |
@@ -1209,6 +1231,9 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1209 | char *info = NULL; | 1231 | char *info = NULL; |
1210 | int error = 0; | 1232 | int error = 0; |
1211 | 1233 | ||
1234 | if (dev->power.syscore) | ||
1235 | return 0; | ||
1236 | |||
1212 | device_lock(dev); | 1237 | device_lock(dev); |
1213 | 1238 | ||
1214 | dev->power.wakeup_path = device_may_wakeup(dev); | 1239 | dev->power.wakeup_path = device_may_wakeup(dev); |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index eeb4bff9505c..0dbfdf4419af 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -1,12 +1,32 @@ | |||
1 | #include <linux/pm_qos.h> | 1 | #include <linux/pm_qos.h> |
2 | 2 | ||
3 | static inline void device_pm_init_common(struct device *dev) | ||
4 | { | ||
5 | if (!dev->power.early_init) { | ||
6 | spin_lock_init(&dev->power.lock); | ||
7 | dev->power.power_state = PMSG_INVALID; | ||
8 | dev->power.early_init = true; | ||
9 | } | ||
10 | } | ||
11 | |||
3 | #ifdef CONFIG_PM_RUNTIME | 12 | #ifdef CONFIG_PM_RUNTIME |
4 | 13 | ||
14 | static inline void pm_runtime_early_init(struct device *dev) | ||
15 | { | ||
16 | dev->power.disable_depth = 1; | ||
17 | device_pm_init_common(dev); | ||
18 | } | ||
19 | |||
5 | extern void pm_runtime_init(struct device *dev); | 20 | extern void pm_runtime_init(struct device *dev); |
6 | extern void pm_runtime_remove(struct device *dev); | 21 | extern void pm_runtime_remove(struct device *dev); |
7 | 22 | ||
8 | #else /* !CONFIG_PM_RUNTIME */ | 23 | #else /* !CONFIG_PM_RUNTIME */ |
9 | 24 | ||
25 | static inline void pm_runtime_early_init(struct device *dev) | ||
26 | { | ||
27 | device_pm_init_common(dev); | ||
28 | } | ||
29 | |||
10 | static inline void pm_runtime_init(struct device *dev) {} | 30 | static inline void pm_runtime_init(struct device *dev) {} |
11 | static inline void pm_runtime_remove(struct device *dev) {} | 31 | static inline void pm_runtime_remove(struct device *dev) {} |
12 | 32 | ||
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry) | |||
25 | return container_of(entry, struct device, power.entry); | 45 | return container_of(entry, struct device, power.entry); |
26 | } | 46 | } |
27 | 47 | ||
28 | extern void device_pm_init(struct device *dev); | 48 | extern void device_pm_sleep_init(struct device *dev); |
29 | extern void device_pm_add(struct device *); | 49 | extern void device_pm_add(struct device *); |
30 | extern void device_pm_remove(struct device *); | 50 | extern void device_pm_remove(struct device *); |
31 | extern void device_pm_move_before(struct device *, struct device *); | 51 | extern void device_pm_move_before(struct device *, struct device *); |
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *); | |||
34 | 54 | ||
35 | #else /* !CONFIG_PM_SLEEP */ | 55 | #else /* !CONFIG_PM_SLEEP */ |
36 | 56 | ||
37 | static inline void device_pm_init(struct device *dev) | 57 | static inline void device_pm_sleep_init(struct device *dev) {} |
38 | { | ||
39 | spin_lock_init(&dev->power.lock); | ||
40 | dev->power.power_state = PMSG_INVALID; | ||
41 | pm_runtime_init(dev); | ||
42 | } | ||
43 | 58 | ||
44 | static inline void device_pm_add(struct device *dev) | 59 | static inline void device_pm_add(struct device *dev) |
45 | { | 60 | { |
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {} | |||
60 | 75 | ||
61 | #endif /* !CONFIG_PM_SLEEP */ | 76 | #endif /* !CONFIG_PM_SLEEP */ |
62 | 77 | ||
78 | static inline void device_pm_init(struct device *dev) | ||
79 | { | ||
80 | device_pm_init_common(dev); | ||
81 | device_pm_sleep_init(dev); | ||
82 | pm_runtime_init(dev); | ||
83 | } | ||
84 | |||
63 | #ifdef CONFIG_PM | 85 | #ifdef CONFIG_PM |
64 | 86 | ||
65 | /* | 87 | /* |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 59894873a3b3..7d9c1cb1c39a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
147 | || (dev->power.request_pending | 147 | || (dev->power.request_pending |
148 | && dev->power.request == RPM_REQ_RESUME)) | 148 | && dev->power.request == RPM_REQ_RESUME)) |
149 | retval = -EAGAIN; | 149 | retval = -EAGAIN; |
150 | else if (__dev_pm_qos_read_value(dev) < 0) | ||
151 | retval = -EPERM; | ||
150 | else if (dev->power.runtime_status == RPM_SUSPENDED) | 152 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
151 | retval = 1; | 153 | retval = 1; |
152 | 154 | ||
@@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
388 | goto repeat; | 390 | goto repeat; |
389 | } | 391 | } |
390 | 392 | ||
391 | dev->power.deferred_resume = false; | ||
392 | if (dev->power.no_callbacks) | 393 | if (dev->power.no_callbacks) |
393 | goto no_callback; /* Assume success. */ | 394 | goto no_callback; /* Assume success. */ |
394 | 395 | ||
@@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
403 | goto out; | 404 | goto out; |
404 | } | 405 | } |
405 | 406 | ||
406 | if (__dev_pm_qos_read_value(dev) < 0) { | ||
407 | /* Negative PM QoS constraint means "never suspend". */ | ||
408 | retval = -EPERM; | ||
409 | goto out; | ||
410 | } | ||
411 | |||
412 | __update_runtime_status(dev, RPM_SUSPENDING); | 407 | __update_runtime_status(dev, RPM_SUSPENDING); |
413 | 408 | ||
414 | if (dev->pm_domain) | 409 | if (dev->pm_domain) |
@@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
440 | wake_up_all(&dev->power.wait_queue); | 435 | wake_up_all(&dev->power.wait_queue); |
441 | 436 | ||
442 | if (dev->power.deferred_resume) { | 437 | if (dev->power.deferred_resume) { |
438 | dev->power.deferred_resume = false; | ||
443 | rpm_resume(dev, 0); | 439 | rpm_resume(dev, 0); |
444 | retval = -EAGAIN; | 440 | retval = -EAGAIN; |
445 | goto out; | 441 | goto out; |
@@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
584 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | 580 | || dev->parent->power.runtime_status == RPM_ACTIVE) { |
585 | atomic_inc(&dev->parent->power.child_count); | 581 | atomic_inc(&dev->parent->power.child_count); |
586 | spin_unlock(&dev->parent->power.lock); | 582 | spin_unlock(&dev->parent->power.lock); |
583 | retval = 1; | ||
587 | goto no_callback; /* Assume success. */ | 584 | goto no_callback; /* Assume success. */ |
588 | } | 585 | } |
589 | spin_unlock(&dev->parent->power.lock); | 586 | spin_unlock(&dev->parent->power.lock); |
@@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
664 | } | 661 | } |
665 | wake_up_all(&dev->power.wait_queue); | 662 | wake_up_all(&dev->power.wait_queue); |
666 | 663 | ||
667 | if (!retval) | 664 | if (retval >= 0) |
668 | rpm_idle(dev, RPM_ASYNC); | 665 | rpm_idle(dev, RPM_ASYNC); |
669 | 666 | ||
670 | out: | 667 | out: |