aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/clock_ops.c19
-rw-r--r--drivers/base/power/common.c52
-rw-r--r--drivers/base/power/domain.c865
-rw-r--r--drivers/base/power/domain_governor.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/base/power/sysfs.c24
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/syscore.c7
9 files changed, 627 insertions, 387 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ab4f4ce02722..b2afc29403f9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/pm_domain.h>
24#include <linux/idr.h> 25#include <linux/idr.h>
25#include <linux/acpi.h> 26#include <linux/acpi.h>
26#include <linux/clk/clk-conf.h> 27#include <linux/clk/clk-conf.h>
@@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev)
506 if (ret < 0) 507 if (ret < 0)
507 return ret; 508 return ret;
508 509
509 acpi_dev_pm_attach(_dev, true); 510 ret = dev_pm_domain_attach(_dev, true);
510 511 if (ret != -EPROBE_DEFER) {
511 ret = drv->probe(dev); 512 ret = drv->probe(dev);
512 if (ret) 513 if (ret)
513 acpi_dev_pm_detach(_dev, true); 514 dev_pm_domain_detach(_dev, true);
515 }
514 516
515 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 517 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
516 dev_warn(_dev, "probe deferral not supported\n"); 518 dev_warn(_dev, "probe deferral not supported\n");
@@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev)
532 int ret; 534 int ret;
533 535
534 ret = drv->remove(dev); 536 ret = drv->remove(dev);
535 acpi_dev_pm_detach(_dev, true); 537 dev_pm_domain_detach(_dev, true);
536 538
537 return ret; 539 return ret;
538} 540}
@@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev)
543 struct platform_device *dev = to_platform_device(_dev); 545 struct platform_device *dev = to_platform_device(_dev);
544 546
545 drv->shutdown(dev); 547 drv->shutdown(dev);
546 acpi_dev_pm_detach(_dev, true); 548 dev_pm_domain_detach(_dev, true);
547} 549}
548 550
549/** 551/**
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b99e6c06ee67..78369305e069 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -368,8 +368,13 @@ int pm_clk_suspend(struct device *dev)
368 368
369 spin_lock_irqsave(&psd->lock, flags); 369 spin_lock_irqsave(&psd->lock, flags);
370 370
371 list_for_each_entry_reverse(ce, &psd->clock_list, node) 371 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
372 clk_disable(ce->clk); 372 if (ce->status < PCE_STATUS_ERROR) {
373 if (ce->status == PCE_STATUS_ENABLED)
374 clk_disable(ce->clk);
375 ce->status = PCE_STATUS_ACQUIRED;
376 }
377 }
373 378
374 spin_unlock_irqrestore(&psd->lock, flags); 379 spin_unlock_irqrestore(&psd->lock, flags);
375 380
@@ -385,6 +390,7 @@ int pm_clk_resume(struct device *dev)
385 struct pm_subsys_data *psd = dev_to_psd(dev); 390 struct pm_subsys_data *psd = dev_to_psd(dev);
386 struct pm_clock_entry *ce; 391 struct pm_clock_entry *ce;
387 unsigned long flags; 392 unsigned long flags;
393 int ret;
388 394
389 dev_dbg(dev, "%s()\n", __func__); 395 dev_dbg(dev, "%s()\n", __func__);
390 396
@@ -394,8 +400,13 @@ int pm_clk_resume(struct device *dev)
394 400
395 spin_lock_irqsave(&psd->lock, flags); 401 spin_lock_irqsave(&psd->lock, flags);
396 402
397 list_for_each_entry(ce, &psd->clock_list, node) 403 list_for_each_entry(ce, &psd->clock_list, node) {
398 __pm_clk_enable(dev, ce->clk); 404 if (ce->status < PCE_STATUS_ERROR) {
405 ret = __pm_clk_enable(dev, ce->clk);
406 if (!ret)
407 ce->status = PCE_STATUS_ENABLED;
408 }
409 }
399 410
400 spin_unlock_irqrestore(&psd->lock, flags); 411 spin_unlock_irqrestore(&psd->lock, flags);
401 412
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df2e5eeaeb05..b0f138806bbc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,8 @@
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/pm_clock.h> 13#include <linux/pm_clock.h>
14#include <linux/acpi.h>
15#include <linux/pm_domain.h>
14 16
15/** 17/**
16 * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. 18 * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
@@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev)
82 return ret; 84 return ret;
83} 85}
84EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); 86EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
87
88/**
89 * dev_pm_domain_attach - Attach a device to its PM domain.
90 * @dev: Device to attach.
91 * @power_on: Used to indicate whether we should power on the device.
92 *
93 * The @dev may only be attached to a single PM domain. By iterating through
94 * the available alternatives we try to find a valid PM domain for the device.
95 * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
96 * should be assigned by the corresponding attach function.
97 *
98 * This function should typically be invoked from subsystem level code during
99 * the probe phase. Especially for those that holds devices which requires
100 * power management through PM domains.
101 *
102 * Callers must ensure proper synchronization of this function with power
103 * management callbacks.
104 *
105 * Returns 0 on successfully attached PM domain or negative error code.
106 */
107int dev_pm_domain_attach(struct device *dev, bool power_on)
108{
109 int ret;
110
111 ret = acpi_dev_pm_attach(dev, power_on);
112 if (ret)
113 ret = genpd_dev_pm_attach(dev);
114
115 return ret;
116}
117EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
118
119/**
120 * dev_pm_domain_detach - Detach a device from its PM domain.
121 * @dev: Device to attach.
122 * @power_off: Used to indicate whether we should power off the device.
123 *
124 * This functions will reverse the actions from dev_pm_domain_attach() and thus
125 * try to detach the @dev from its PM domain. Typically it should be invoked
126 * from subsystem level code during the remove phase.
127 *
128 * Callers must ensure proper synchronization of this function with power
129 * management callbacks.
130 */
131void dev_pm_domain_detach(struct device *dev, bool power_off)
132{
133 if (dev->pm_domain && dev->pm_domain->detach)
134 dev->pm_domain->detach(dev, power_off);
135}
136EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index eee55c1e5fde..40bc2f4072cc 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/platform_device.h>
11#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
12#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
13#include <linux/pm_qos.h> 14#include <linux/pm_qos.h>
@@ -25,10 +26,6 @@
25 __routine = genpd->dev_ops.callback; \ 26 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \ 27 if (__routine) { \
27 __ret = __routine(dev); \ 28 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \ 29 } \
33 __ret; \ 30 __ret; \
34}) 31})
@@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
70 return genpd; 67 return genpd;
71} 68}
72 69
73#ifdef CONFIG_PM
74
75struct generic_pm_domain *dev_to_genpd(struct device *dev) 70struct generic_pm_domain *dev_to_genpd(struct device *dev)
76{ 71{
77 if (IS_ERR_OR_NULL(dev->pm_domain)) 72 if (IS_ERR_OR_NULL(dev->pm_domain))
@@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
147{ 142{
148 s64 usecs64; 143 s64 usecs64;
149 144
150 if (!genpd->cpu_data) 145 if (!genpd->cpuidle_data)
151 return; 146 return;
152 147
153 usecs64 = genpd->power_on_latency_ns; 148 usecs64 = genpd->power_on_latency_ns;
154 do_div(usecs64, NSEC_PER_USEC); 149 do_div(usecs64, NSEC_PER_USEC);
155 usecs64 += genpd->cpu_data->saved_exit_latency; 150 usecs64 += genpd->cpuidle_data->saved_exit_latency;
156 genpd->cpu_data->idle_state->exit_latency = usecs64; 151 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
157} 152}
158 153
159/** 154/**
@@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
193 return 0; 188 return 0;
194 } 189 }
195 190
196 if (genpd->cpu_data) { 191 if (genpd->cpuidle_data) {
197 cpuidle_pause_and_lock(); 192 cpuidle_pause_and_lock();
198 genpd->cpu_data->idle_state->disabled = true; 193 genpd->cpuidle_data->idle_state->disabled = true;
199 cpuidle_resume_and_unlock(); 194 cpuidle_resume_and_unlock();
200 goto out; 195 goto out;
201 } 196 }
@@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name)
285 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
286} 281}
287 282
288#endif /* CONFIG_PM */
289
290#ifdef CONFIG_PM_RUNTIME 283#ifdef CONFIG_PM_RUNTIME
291 284
292static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 285static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
@@ -430,7 +423,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
430 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 423 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
431 * before. 424 * before.
432 */ 425 */
433void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 426static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
434{ 427{
435 queue_work(pm_wq, &genpd->power_off_work); 428 queue_work(pm_wq, &genpd->power_off_work);
436} 429}
@@ -520,17 +513,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
520 } 513 }
521 } 514 }
522 515
523 if (genpd->cpu_data) { 516 if (genpd->cpuidle_data) {
524 /* 517 /*
525 * If cpu_data is set, cpuidle should turn the domain off when 518 * If cpuidle_data is set, cpuidle should turn the domain off
526 * the CPU in it is idle. In that case we don't decrement the 519 * when the CPU in it is idle. In that case we don't decrement
527 * subdomain counts of the master domains, so that power is not 520 * the subdomain counts of the master domains, so that power is
528 * removed from the current domain prematurely as a result of 521 * not removed from the current domain prematurely as a result
529 * cutting off the masters' power. 522 * of cutting off the masters' power.
530 */ 523 */
531 genpd->status = GPD_STATE_POWER_OFF; 524 genpd->status = GPD_STATE_POWER_OFF;
532 cpuidle_pause_and_lock(); 525 cpuidle_pause_and_lock();
533 genpd->cpu_data->idle_state->disabled = false; 526 genpd->cpuidle_data->idle_state->disabled = false;
534 cpuidle_resume_and_unlock(); 527 cpuidle_resume_and_unlock();
535 goto out; 528 goto out;
536 } 529 }
@@ -619,8 +612,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
619 if (IS_ERR(genpd)) 612 if (IS_ERR(genpd))
620 return -EINVAL; 613 return -EINVAL;
621 614
622 might_sleep_if(!genpd->dev_irq_safe);
623
624 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 615 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
625 if (stop_ok && !stop_ok(dev)) 616 if (stop_ok && !stop_ok(dev))
626 return -EBUSY; 617 return -EBUSY;
@@ -665,8 +656,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
665 if (IS_ERR(genpd)) 656 if (IS_ERR(genpd))
666 return -EINVAL; 657 return -EINVAL;
667 658
668 might_sleep_if(!genpd->dev_irq_safe);
669
670 /* If power.irq_safe, the PM domain is never powered off. */ 659 /* If power.irq_safe, the PM domain is never powered off. */
671 if (dev->power.irq_safe) 660 if (dev->power.irq_safe)
672 return genpd_start_dev_no_timing(genpd, dev); 661 return genpd_start_dev_no_timing(genpd, dev);
@@ -733,6 +722,13 @@ void pm_genpd_poweroff_unused(void)
733 mutex_unlock(&gpd_list_lock); 722 mutex_unlock(&gpd_list_lock);
734} 723}
735 724
725static int __init genpd_poweroff_unused(void)
726{
727 pm_genpd_poweroff_unused();
728 return 0;
729}
730late_initcall(genpd_poweroff_unused);
731
736#else 732#else
737 733
738static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 734static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -741,6 +737,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
741 return NOTIFY_DONE; 737 return NOTIFY_DONE;
742} 738}
743 739
740static inline void
741genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
742
744static inline void genpd_power_off_work_fn(struct work_struct *work) {} 743static inline void genpd_power_off_work_fn(struct work_struct *work) {}
745 744
746#define pm_genpd_runtime_suspend NULL 745#define pm_genpd_runtime_suspend NULL
@@ -774,46 +773,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
774 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 773 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
775} 774}
776 775
777static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
778{
779 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
780}
781
782static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
783{
784 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
785}
786
787static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
788{
789 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
790}
791
792static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
793{
794 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
795}
796
797static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
798{
799 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
800}
801
802static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
803{
804 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
805}
806
807static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
808{
809 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
810}
811
812static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
813{
814 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
815}
816
817/** 776/**
818 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 777 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
819 * @genpd: PM domain to power off, if possible. 778 * @genpd: PM domain to power off, if possible.
@@ -995,7 +954,7 @@ static int pm_genpd_suspend(struct device *dev)
995 if (IS_ERR(genpd)) 954 if (IS_ERR(genpd))
996 return -EINVAL; 955 return -EINVAL;
997 956
998 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); 957 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
999} 958}
1000 959
1001/** 960/**
@@ -1016,7 +975,7 @@ static int pm_genpd_suspend_late(struct device *dev)
1016 if (IS_ERR(genpd)) 975 if (IS_ERR(genpd))
1017 return -EINVAL; 976 return -EINVAL;
1018 977
1019 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); 978 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
1020} 979}
1021 980
1022/** 981/**
@@ -1103,7 +1062,7 @@ static int pm_genpd_resume_early(struct device *dev)
1103 if (IS_ERR(genpd)) 1062 if (IS_ERR(genpd))
1104 return -EINVAL; 1063 return -EINVAL;
1105 1064
1106 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); 1065 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1107} 1066}
1108 1067
1109/** 1068/**
@@ -1124,7 +1083,7 @@ static int pm_genpd_resume(struct device *dev)
1124 if (IS_ERR(genpd)) 1083 if (IS_ERR(genpd))
1125 return -EINVAL; 1084 return -EINVAL;
1126 1085
1127 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); 1086 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1128} 1087}
1129 1088
1130/** 1089/**
@@ -1145,7 +1104,7 @@ static int pm_genpd_freeze(struct device *dev)
1145 if (IS_ERR(genpd)) 1104 if (IS_ERR(genpd))
1146 return -EINVAL; 1105 return -EINVAL;
1147 1106
1148 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); 1107 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1149} 1108}
1150 1109
1151/** 1110/**
@@ -1167,7 +1126,7 @@ static int pm_genpd_freeze_late(struct device *dev)
1167 if (IS_ERR(genpd)) 1126 if (IS_ERR(genpd))
1168 return -EINVAL; 1127 return -EINVAL;
1169 1128
1170 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); 1129 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1171} 1130}
1172 1131
1173/** 1132/**
@@ -1231,7 +1190,7 @@ static int pm_genpd_thaw_early(struct device *dev)
1231 if (IS_ERR(genpd)) 1190 if (IS_ERR(genpd))
1232 return -EINVAL; 1191 return -EINVAL;
1233 1192
1234 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); 1193 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1235} 1194}
1236 1195
1237/** 1196/**
@@ -1252,7 +1211,7 @@ static int pm_genpd_thaw(struct device *dev)
1252 if (IS_ERR(genpd)) 1211 if (IS_ERR(genpd))
1253 return -EINVAL; 1212 return -EINVAL;
1254 1213
1255 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); 1214 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1256} 1215}
1257 1216
1258/** 1217/**
@@ -1344,13 +1303,13 @@ static void pm_genpd_complete(struct device *dev)
1344} 1303}
1345 1304
1346/** 1305/**
1347 * pm_genpd_syscore_switch - Switch power during system core suspend or resume. 1306 * genpd_syscore_switch - Switch power during system core suspend or resume.
1348 * @dev: Device that normally is marked as "always on" to switch power for. 1307 * @dev: Device that normally is marked as "always on" to switch power for.
1349 * 1308 *
1350 * This routine may only be called during the system core (syscore) suspend or 1309 * This routine may only be called during the system core (syscore) suspend or
1351 * resume phase for devices whose "always on" flags are set. 1310 * resume phase for devices whose "always on" flags are set.
1352 */ 1311 */
1353void pm_genpd_syscore_switch(struct device *dev, bool suspend) 1312static void genpd_syscore_switch(struct device *dev, bool suspend)
1354{ 1313{
1355 struct generic_pm_domain *genpd; 1314 struct generic_pm_domain *genpd;
1356 1315
@@ -1366,7 +1325,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1366 genpd->suspended_count--; 1325 genpd->suspended_count--;
1367 } 1326 }
1368} 1327}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); 1328
1329void pm_genpd_syscore_poweroff(struct device *dev)
1330{
1331 genpd_syscore_switch(dev, true);
1332}
1333EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1334
1335void pm_genpd_syscore_poweron(struct device *dev)
1336{
1337 genpd_syscore_switch(dev, false);
1338}
1339EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1370 1340
1371#else 1341#else
1372 1342
@@ -1466,6 +1436,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1466 1436
1467 spin_unlock_irq(&dev->power.lock); 1437 spin_unlock_irq(&dev->power.lock);
1468 1438
1439 if (genpd->attach_dev)
1440 genpd->attach_dev(dev);
1441
1469 mutex_lock(&gpd_data->lock); 1442 mutex_lock(&gpd_data->lock);
1470 gpd_data->base.dev = dev; 1443 gpd_data->base.dev = dev;
1471 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
@@ -1484,39 +1457,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1484} 1457}
1485 1458
1486/** 1459/**
1487 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1488 * @genpd_node: Device tree node pointer representing a PM domain to which the
1489 * the device is added to.
1490 * @dev: Device to be added.
1491 * @td: Set of PM QoS timing parameters to attach to the device.
1492 */
1493int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1494 struct gpd_timing_data *td)
1495{
1496 struct generic_pm_domain *genpd = NULL, *gpd;
1497
1498 dev_dbg(dev, "%s()\n", __func__);
1499
1500 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1501 return -EINVAL;
1502
1503 mutex_lock(&gpd_list_lock);
1504 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1505 if (gpd->of_node == genpd_node) {
1506 genpd = gpd;
1507 break;
1508 }
1509 }
1510 mutex_unlock(&gpd_list_lock);
1511
1512 if (!genpd)
1513 return -EINVAL;
1514
1515 return __pm_genpd_add_device(genpd, dev, td);
1516}
1517
1518
1519/**
1520 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1460 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1521 * @domain_name: Name of the PM domain to add the device to. 1461 * @domain_name: Name of the PM domain to add the device to.
1522 * @dev: Device to be added. 1462 * @dev: Device to be added.
@@ -1558,6 +1498,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1558 genpd->device_count--; 1498 genpd->device_count--;
1559 genpd->max_off_time_changed = true; 1499 genpd->max_off_time_changed = true;
1560 1500
1501 if (genpd->detach_dev)
1502 genpd->detach_dev(dev);
1503
1561 spin_lock_irq(&dev->power.lock); 1504 spin_lock_irq(&dev->power.lock);
1562 1505
1563 dev->pm_domain = NULL; 1506 dev->pm_domain = NULL;
@@ -1744,112 +1687,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1744} 1687}
1745 1688
1746/** 1689/**
1747 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1748 * @dev: Device to add the callbacks to.
1749 * @ops: Set of callbacks to add.
1750 * @td: Timing data to add to the device along with the callbacks (optional).
1751 *
1752 * Every call to this routine should be balanced with a call to
1753 * __pm_genpd_remove_callbacks() and they must not be nested.
1754 */
1755int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1756 struct gpd_timing_data *td)
1757{
1758 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1759 int ret = 0;
1760
1761 if (!(dev && ops))
1762 return -EINVAL;
1763
1764 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1765 if (!gpd_data_new)
1766 return -ENOMEM;
1767
1768 pm_runtime_disable(dev);
1769 device_pm_lock();
1770
1771 ret = dev_pm_get_subsys_data(dev);
1772 if (ret)
1773 goto out;
1774
1775 spin_lock_irq(&dev->power.lock);
1776
1777 if (dev->power.subsys_data->domain_data) {
1778 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1779 } else {
1780 gpd_data = gpd_data_new;
1781 dev->power.subsys_data->domain_data = &gpd_data->base;
1782 }
1783 gpd_data->refcount++;
1784 gpd_data->ops = *ops;
1785 if (td)
1786 gpd_data->td = *td;
1787
1788 spin_unlock_irq(&dev->power.lock);
1789
1790 out:
1791 device_pm_unlock();
1792 pm_runtime_enable(dev);
1793
1794 if (gpd_data != gpd_data_new)
1795 __pm_genpd_free_dev_data(dev, gpd_data_new);
1796
1797 return ret;
1798}
1799EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1800
1801/**
1802 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1803 * @dev: Device to remove the callbacks from.
1804 * @clear_td: If set, clear the device's timing data too.
1805 *
1806 * This routine can only be called after pm_genpd_add_callbacks().
1807 */
1808int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1809{
1810 struct generic_pm_domain_data *gpd_data = NULL;
1811 bool remove = false;
1812 int ret = 0;
1813
1814 if (!(dev && dev->power.subsys_data))
1815 return -EINVAL;
1816
1817 pm_runtime_disable(dev);
1818 device_pm_lock();
1819
1820 spin_lock_irq(&dev->power.lock);
1821
1822 if (dev->power.subsys_data->domain_data) {
1823 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1824 gpd_data->ops = (struct gpd_dev_ops){ NULL };
1825 if (clear_td)
1826 gpd_data->td = (struct gpd_timing_data){ 0 };
1827
1828 if (--gpd_data->refcount == 0) {
1829 dev->power.subsys_data->domain_data = NULL;
1830 remove = true;
1831 }
1832 } else {
1833 ret = -EINVAL;
1834 }
1835
1836 spin_unlock_irq(&dev->power.lock);
1837
1838 device_pm_unlock();
1839 pm_runtime_enable(dev);
1840
1841 if (ret)
1842 return ret;
1843
1844 dev_pm_put_subsys_data(dev);
1845 if (remove)
1846 __pm_genpd_free_dev_data(dev, gpd_data);
1847
1848 return 0;
1849}
1850EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1851
1852/**
1853 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1690 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1854 * @genpd: PM domain to be connected with cpuidle. 1691 * @genpd: PM domain to be connected with cpuidle.
1855 * @state: cpuidle state this domain can disable/enable. 1692 * @state: cpuidle state this domain can disable/enable.
@@ -1861,7 +1698,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1861int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1698int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1862{ 1699{
1863 struct cpuidle_driver *cpuidle_drv; 1700 struct cpuidle_driver *cpuidle_drv;
1864 struct gpd_cpu_data *cpu_data; 1701 struct gpd_cpuidle_data *cpuidle_data;
1865 struct cpuidle_state *idle_state; 1702 struct cpuidle_state *idle_state;
1866 int ret = 0; 1703 int ret = 0;
1867 1704
@@ -1870,12 +1707,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1870 1707
1871 genpd_acquire_lock(genpd); 1708 genpd_acquire_lock(genpd);
1872 1709
1873 if (genpd->cpu_data) { 1710 if (genpd->cpuidle_data) {
1874 ret = -EEXIST; 1711 ret = -EEXIST;
1875 goto out; 1712 goto out;
1876 } 1713 }
1877 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); 1714 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1878 if (!cpu_data) { 1715 if (!cpuidle_data) {
1879 ret = -ENOMEM; 1716 ret = -ENOMEM;
1880 goto out; 1717 goto out;
1881 } 1718 }
@@ -1893,9 +1730,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1893 ret = -EAGAIN; 1730 ret = -EAGAIN;
1894 goto err; 1731 goto err;
1895 } 1732 }
1896 cpu_data->idle_state = idle_state; 1733 cpuidle_data->idle_state = idle_state;
1897 cpu_data->saved_exit_latency = idle_state->exit_latency; 1734 cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1898 genpd->cpu_data = cpu_data; 1735 genpd->cpuidle_data = cpuidle_data;
1899 genpd_recalc_cpu_exit_latency(genpd); 1736 genpd_recalc_cpu_exit_latency(genpd);
1900 1737
1901 out: 1738 out:
@@ -1906,7 +1743,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1906 cpuidle_driver_unref(); 1743 cpuidle_driver_unref();
1907 1744
1908 err_drv: 1745 err_drv:
1909 kfree(cpu_data); 1746 kfree(cpuidle_data);
1910 goto out; 1747 goto out;
1911} 1748}
1912 1749
@@ -1929,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
1929 */ 1766 */
1930int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1767int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1931{ 1768{
1932 struct gpd_cpu_data *cpu_data; 1769 struct gpd_cpuidle_data *cpuidle_data;
1933 struct cpuidle_state *idle_state; 1770 struct cpuidle_state *idle_state;
1934 int ret = 0; 1771 int ret = 0;
1935 1772
@@ -1938,20 +1775,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1938 1775
1939 genpd_acquire_lock(genpd); 1776 genpd_acquire_lock(genpd);
1940 1777
1941 cpu_data = genpd->cpu_data; 1778 cpuidle_data = genpd->cpuidle_data;
1942 if (!cpu_data) { 1779 if (!cpuidle_data) {
1943 ret = -ENODEV; 1780 ret = -ENODEV;
1944 goto out; 1781 goto out;
1945 } 1782 }
1946 idle_state = cpu_data->idle_state; 1783 idle_state = cpuidle_data->idle_state;
1947 if (!idle_state->disabled) { 1784 if (!idle_state->disabled) {
1948 ret = -EAGAIN; 1785 ret = -EAGAIN;
1949 goto out; 1786 goto out;
1950 } 1787 }
1951 idle_state->exit_latency = cpu_data->saved_exit_latency; 1788 idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1952 cpuidle_driver_unref(); 1789 cpuidle_driver_unref();
1953 genpd->cpu_data = NULL; 1790 genpd->cpuidle_data = NULL;
1954 kfree(cpu_data); 1791 kfree(cpuidle_data);
1955 1792
1956 out: 1793 out:
1957 genpd_release_lock(genpd); 1794 genpd_release_lock(genpd);
@@ -1970,17 +1807,13 @@ int pm_genpd_name_detach_cpuidle(const char *name)
1970/* Default device callbacks for generic PM domains. */ 1807/* Default device callbacks for generic PM domains. */
1971 1808
1972/** 1809/**
1973 * pm_genpd_default_save_state - Default "save device state" for PM domians. 1810 * pm_genpd_default_save_state - Default "save device state" for PM domains.
1974 * @dev: Device to handle. 1811 * @dev: Device to handle.
1975 */ 1812 */
1976static int pm_genpd_default_save_state(struct device *dev) 1813static int pm_genpd_default_save_state(struct device *dev)
1977{ 1814{
1978 int (*cb)(struct device *__dev); 1815 int (*cb)(struct device *__dev);
1979 1816
1980 cb = dev_gpd_data(dev)->ops.save_state;
1981 if (cb)
1982 return cb(dev);
1983
1984 if (dev->type && dev->type->pm) 1817 if (dev->type && dev->type->pm)
1985 cb = dev->type->pm->runtime_suspend; 1818 cb = dev->type->pm->runtime_suspend;
1986 else if (dev->class && dev->class->pm) 1819 else if (dev->class && dev->class->pm)
@@ -1997,17 +1830,13 @@ static int pm_genpd_default_save_state(struct device *dev)
1997} 1830}
1998 1831
1999/** 1832/**
2000 * pm_genpd_default_restore_state - Default PM domians "restore device state". 1833 * pm_genpd_default_restore_state - Default PM domains "restore device state".
2001 * @dev: Device to handle. 1834 * @dev: Device to handle.
2002 */ 1835 */
2003static int pm_genpd_default_restore_state(struct device *dev) 1836static int pm_genpd_default_restore_state(struct device *dev)
2004{ 1837{
2005 int (*cb)(struct device *__dev); 1838 int (*cb)(struct device *__dev);
2006 1839
2007 cb = dev_gpd_data(dev)->ops.restore_state;
2008 if (cb)
2009 return cb(dev);
2010
2011 if (dev->type && dev->type->pm) 1840 if (dev->type && dev->type->pm)
2012 cb = dev->type->pm->runtime_resume; 1841 cb = dev->type->pm->runtime_resume;
2013 else if (dev->class && dev->class->pm) 1842 else if (dev->class && dev->class->pm)
@@ -2023,109 +1852,6 @@ static int pm_genpd_default_restore_state(struct device *dev)
2023 return cb ? cb(dev) : 0; 1852 return cb ? cb(dev) : 0;
2024} 1853}
2025 1854
2026#ifdef CONFIG_PM_SLEEP
2027
2028/**
2029 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
2030 * @dev: Device to handle.
2031 */
2032static int pm_genpd_default_suspend(struct device *dev)
2033{
2034 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2035
2036 return cb ? cb(dev) : pm_generic_suspend(dev);
2037}
2038
2039/**
2040 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
2041 * @dev: Device to handle.
2042 */
2043static int pm_genpd_default_suspend_late(struct device *dev)
2044{
2045 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2046
2047 return cb ? cb(dev) : pm_generic_suspend_late(dev);
2048}
2049
2050/**
2051 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
2052 * @dev: Device to handle.
2053 */
2054static int pm_genpd_default_resume_early(struct device *dev)
2055{
2056 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2057
2058 return cb ? cb(dev) : pm_generic_resume_early(dev);
2059}
2060
2061/**
2062 * pm_genpd_default_resume - Default "device resume" for PM domians.
2063 * @dev: Device to handle.
2064 */
2065static int pm_genpd_default_resume(struct device *dev)
2066{
2067 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2068
2069 return cb ? cb(dev) : pm_generic_resume(dev);
2070}
2071
2072/**
2073 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
2074 * @dev: Device to handle.
2075 */
2076static int pm_genpd_default_freeze(struct device *dev)
2077{
2078 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2079
2080 return cb ? cb(dev) : pm_generic_freeze(dev);
2081}
2082
2083/**
2084 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
2085 * @dev: Device to handle.
2086 */
2087static int pm_genpd_default_freeze_late(struct device *dev)
2088{
2089 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2090
2091 return cb ? cb(dev) : pm_generic_freeze_late(dev);
2092}
2093
2094/**
2095 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
2096 * @dev: Device to handle.
2097 */
2098static int pm_genpd_default_thaw_early(struct device *dev)
2099{
2100 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2101
2102 return cb ? cb(dev) : pm_generic_thaw_early(dev);
2103}
2104
2105/**
2106 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2107 * @dev: Device to handle.
2108 */
2109static int pm_genpd_default_thaw(struct device *dev)
2110{
2111 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2112
2113 return cb ? cb(dev) : pm_generic_thaw(dev);
2114}
2115
2116#else /* !CONFIG_PM_SLEEP */
2117
2118#define pm_genpd_default_suspend NULL
2119#define pm_genpd_default_suspend_late NULL
2120#define pm_genpd_default_resume_early NULL
2121#define pm_genpd_default_resume NULL
2122#define pm_genpd_default_freeze NULL
2123#define pm_genpd_default_freeze_late NULL
2124#define pm_genpd_default_thaw_early NULL
2125#define pm_genpd_default_thaw NULL
2126
2127#endif /* !CONFIG_PM_SLEEP */
2128
2129/** 1855/**
2130 * pm_genpd_init - Initialize a generic I/O PM domain object. 1856 * pm_genpd_init - Initialize a generic I/O PM domain object.
2131 * @genpd: PM domain object to initialize. 1857 * @genpd: PM domain object to initialize.
@@ -2177,15 +1903,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
2177 genpd->domain.ops.complete = pm_genpd_complete; 1903 genpd->domain.ops.complete = pm_genpd_complete;
2178 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1904 genpd->dev_ops.save_state = pm_genpd_default_save_state;
2179 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1905 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2180 genpd->dev_ops.suspend = pm_genpd_default_suspend;
2181 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2182 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2183 genpd->dev_ops.resume = pm_genpd_default_resume;
2184 genpd->dev_ops.freeze = pm_genpd_default_freeze;
2185 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2186 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2187 genpd->dev_ops.thaw = pm_genpd_default_thaw;
2188 mutex_lock(&gpd_list_lock); 1906 mutex_lock(&gpd_list_lock);
2189 list_add(&genpd->gpd_list_node, &gpd_list); 1907 list_add(&genpd->gpd_list_node, &gpd_list);
2190 mutex_unlock(&gpd_list_lock); 1908 mutex_unlock(&gpd_list_lock);
2191} 1909}
1910
1911#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1912/*
1913 * Device Tree based PM domain providers.
1914 *
1915 * The code below implements generic device tree based PM domain providers that
1916 * bind device tree nodes with generic PM domains registered in the system.
1917 *
1918 * Any driver that registers generic PM domains and needs to support binding of
1919 * devices to these domains is supposed to register a PM domain provider, which
1920 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1921 *
1922 * Two simple mapping functions have been provided for convenience:
1923 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1924 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1925 * index.
1926 */
1927
1928/**
1929 * struct of_genpd_provider - PM domain provider registration structure
1930 * @link: Entry in global list of PM domain providers
1931 * @node: Pointer to device tree node of PM domain provider
1932 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1933 * into a PM domain.
1934 * @data: context pointer to be passed into @xlate callback
1935 */
1936struct of_genpd_provider {
1937 struct list_head link;
1938 struct device_node *node;
1939 genpd_xlate_t xlate;
1940 void *data;
1941};
1942
1943/* List of registered PM domain providers. */
1944static LIST_HEAD(of_genpd_providers);
1945/* Mutex to protect the list above. */
1946static DEFINE_MUTEX(of_genpd_mutex);
1947
1948/**
1949 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1950 * @genpdspec: OF phandle args to map into a PM domain
1951 * @data: xlate function private data - pointer to struct generic_pm_domain
1952 *
1953 * This is a generic xlate function that can be used to model PM domains that
1954 * have their own device tree nodes. The private data of xlate function needs
1955 * to be a valid pointer to struct generic_pm_domain.
1956 */
1957struct generic_pm_domain *__of_genpd_xlate_simple(
1958 struct of_phandle_args *genpdspec,
1959 void *data)
1960{
1961 if (genpdspec->args_count != 0)
1962 return ERR_PTR(-EINVAL);
1963 return data;
1964}
1965EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1966
1967/**
1968 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1969 * @genpdspec: OF phandle args to map into a PM domain
1970 * @data: xlate function private data - pointer to struct genpd_onecell_data
1971 *
1972 * This is a generic xlate function that can be used to model simple PM domain
1973 * controllers that have one device tree node and provide multiple PM domains.
1974 * A single cell is used as an index into an array of PM domains specified in
1975 * the genpd_onecell_data struct when registering the provider.
1976 */
1977struct generic_pm_domain *__of_genpd_xlate_onecell(
1978 struct of_phandle_args *genpdspec,
1979 void *data)
1980{
1981 struct genpd_onecell_data *genpd_data = data;
1982 unsigned int idx = genpdspec->args[0];
1983
1984 if (genpdspec->args_count != 1)
1985 return ERR_PTR(-EINVAL);
1986
1987 if (idx >= genpd_data->num_domains) {
1988 pr_err("%s: invalid domain index %u\n", __func__, idx);
1989 return ERR_PTR(-EINVAL);
1990 }
1991
1992 if (!genpd_data->domains[idx])
1993 return ERR_PTR(-ENOENT);
1994
1995 return genpd_data->domains[idx];
1996}
1997EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
1998
1999/**
2000 * __of_genpd_add_provider() - Register a PM domain provider for a node
2001 * @np: Device node pointer associated with the PM domain provider.
2002 * @xlate: Callback for decoding PM domain from phandle arguments.
2003 * @data: Context pointer for @xlate callback.
2004 */
2005int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2006 void *data)
2007{
2008 struct of_genpd_provider *cp;
2009
2010 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2011 if (!cp)
2012 return -ENOMEM;
2013
2014 cp->node = of_node_get(np);
2015 cp->data = data;
2016 cp->xlate = xlate;
2017
2018 mutex_lock(&of_genpd_mutex);
2019 list_add(&cp->link, &of_genpd_providers);
2020 mutex_unlock(&of_genpd_mutex);
2021 pr_debug("Added domain provider from %s\n", np->full_name);
2022
2023 return 0;
2024}
2025EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2026
2027/**
2028 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2029 * @np: Device node pointer associated with the PM domain provider
2030 */
2031void of_genpd_del_provider(struct device_node *np)
2032{
2033 struct of_genpd_provider *cp;
2034
2035 mutex_lock(&of_genpd_mutex);
2036 list_for_each_entry(cp, &of_genpd_providers, link) {
2037 if (cp->node == np) {
2038 list_del(&cp->link);
2039 of_node_put(cp->node);
2040 kfree(cp);
2041 break;
2042 }
2043 }
2044 mutex_unlock(&of_genpd_mutex);
2045}
2046EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2047
2048/**
2049 * of_genpd_get_from_provider() - Look-up PM domain
2050 * @genpdspec: OF phandle args to use for look-up
2051 *
2052 * Looks for a PM domain provider under the node specified by @genpdspec and if
2053 * found, uses xlate function of the provider to map phandle args to a PM
2054 * domain.
2055 *
2056 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2057 * on failure.
2058 */
2059static struct generic_pm_domain *of_genpd_get_from_provider(
2060 struct of_phandle_args *genpdspec)
2061{
2062 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2063 struct of_genpd_provider *provider;
2064
2065 mutex_lock(&of_genpd_mutex);
2066
2067 /* Check if we have such a provider in our array */
2068 list_for_each_entry(provider, &of_genpd_providers, link) {
2069 if (provider->node == genpdspec->np)
2070 genpd = provider->xlate(genpdspec, provider->data);
2071 if (!IS_ERR(genpd))
2072 break;
2073 }
2074
2075 mutex_unlock(&of_genpd_mutex);
2076
2077 return genpd;
2078}
2079
2080/**
2081 * genpd_dev_pm_detach - Detach a device from its PM domain.
2082 * @dev: Device to attach.
2083 * @power_off: Currently not used
2084 *
2085 * Try to locate a corresponding generic PM domain, which the device was
2086 * attached to previously. If such is found, the device is detached from it.
2087 */
2088static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2089{
2090 struct generic_pm_domain *pd = NULL, *gpd;
2091 int ret = 0;
2092
2093 if (!dev->pm_domain)
2094 return;
2095
2096 mutex_lock(&gpd_list_lock);
2097 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2098 if (&gpd->domain == dev->pm_domain) {
2099 pd = gpd;
2100 break;
2101 }
2102 }
2103 mutex_unlock(&gpd_list_lock);
2104
2105 if (!pd)
2106 return;
2107
2108 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2109
2110 while (1) {
2111 ret = pm_genpd_remove_device(pd, dev);
2112 if (ret != -EAGAIN)
2113 break;
2114 cond_resched();
2115 }
2116
2117 if (ret < 0) {
2118 dev_err(dev, "failed to remove from PM domain %s: %d",
2119 pd->name, ret);
2120 return;
2121 }
2122
2123 /* Check if PM domain can be powered off after removing this device. */
2124 genpd_queue_power_off_work(pd);
2125}
2126
2127/**
2128 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2129 * @dev: Device to attach.
2130 *
2131 * Parse device's OF node to find a PM domain specifier. If such is found,
2132 * attaches the device to retrieved pm_domain ops.
2133 *
2134 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2135 * backwards compatibility with existing DTBs.
2136 *
2137 * Returns 0 on successfully attached PM domain or negative error code.
2138 */
2139int genpd_dev_pm_attach(struct device *dev)
2140{
2141 struct of_phandle_args pd_args;
2142 struct generic_pm_domain *pd;
2143 int ret;
2144
2145 if (!dev->of_node)
2146 return -ENODEV;
2147
2148 if (dev->pm_domain)
2149 return -EEXIST;
2150
2151 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2152 "#power-domain-cells", 0, &pd_args);
2153 if (ret < 0) {
2154 if (ret != -ENOENT)
2155 return ret;
2156
2157 /*
2158 * Try legacy Samsung-specific bindings
2159 * (for backwards compatibility of DT ABI)
2160 */
2161 pd_args.args_count = 0;
2162 pd_args.np = of_parse_phandle(dev->of_node,
2163 "samsung,power-domain", 0);
2164 if (!pd_args.np)
2165 return -ENOENT;
2166 }
2167
2168 pd = of_genpd_get_from_provider(&pd_args);
2169 if (IS_ERR(pd)) {
2170 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2171 __func__, PTR_ERR(pd));
2172 of_node_put(dev->of_node);
2173 return PTR_ERR(pd);
2174 }
2175
2176 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2177
2178 while (1) {
2179 ret = pm_genpd_add_device(pd, dev);
2180 if (ret != -EAGAIN)
2181 break;
2182 cond_resched();
2183 }
2184
2185 if (ret < 0) {
2186 dev_err(dev, "failed to add to PM domain %s: %d",
2187 pd->name, ret);
2188 of_node_put(dev->of_node);
2189 return ret;
2190 }
2191
2192 dev->pm_domain->detach = genpd_dev_pm_detach;
2193
2194 return 0;
2195}
2196EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2197#endif
2198
2199
2200/*** debugfs support ***/
2201
2202#ifdef CONFIG_PM_ADVANCED_DEBUG
2203#include <linux/pm.h>
2204#include <linux/device.h>
2205#include <linux/debugfs.h>
2206#include <linux/seq_file.h>
2207#include <linux/init.h>
2208#include <linux/kobject.h>
2209static struct dentry *pm_genpd_debugfs_dir;
2210
2211/*
2212 * TODO: This function is a slightly modified version of rtpm_status_show
2213 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
2214 * are too loose to generalize it.
2215 */
2216#ifdef CONFIG_PM_RUNTIME
2217static void rtpm_status_str(struct seq_file *s, struct device *dev)
2218{
2219 static const char * const status_lookup[] = {
2220 [RPM_ACTIVE] = "active",
2221 [RPM_RESUMING] = "resuming",
2222 [RPM_SUSPENDED] = "suspended",
2223 [RPM_SUSPENDING] = "suspending"
2224 };
2225 const char *p = "";
2226
2227 if (dev->power.runtime_error)
2228 p = "error";
2229 else if (dev->power.disable_depth)
2230 p = "unsupported";
2231 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2232 p = status_lookup[dev->power.runtime_status];
2233 else
2234 WARN_ON(1);
2235
2236 seq_puts(s, p);
2237}
2238#else
2239static void rtpm_status_str(struct seq_file *s, struct device *dev)
2240{
2241 seq_puts(s, "active");
2242}
2243#endif
2244
2245static int pm_genpd_summary_one(struct seq_file *s,
2246 struct generic_pm_domain *gpd)
2247{
2248 static const char * const status_lookup[] = {
2249 [GPD_STATE_ACTIVE] = "on",
2250 [GPD_STATE_WAIT_MASTER] = "wait-master",
2251 [GPD_STATE_BUSY] = "busy",
2252 [GPD_STATE_REPEAT] = "off-in-progress",
2253 [GPD_STATE_POWER_OFF] = "off"
2254 };
2255 struct pm_domain_data *pm_data;
2256 const char *kobj_path;
2257 struct gpd_link *link;
2258 int ret;
2259
2260 ret = mutex_lock_interruptible(&gpd->lock);
2261 if (ret)
2262 return -ERESTARTSYS;
2263
2264 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2265 goto exit;
2266 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
2267
2268 /*
2269 * Modifications on the list require holding locks on both
2270 * master and slave, so we are safe.
2271 * Also gpd->name is immutable.
2272 */
2273 list_for_each_entry(link, &gpd->master_links, master_node) {
2274 seq_printf(s, "%s", link->slave->name);
2275 if (!list_is_last(&link->master_node, &gpd->master_links))
2276 seq_puts(s, ", ");
2277 }
2278
2279 list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2280 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2281 if (kobj_path == NULL)
2282 continue;
2283
2284 seq_printf(s, "\n %-50s ", kobj_path);
2285 rtpm_status_str(s, pm_data->dev);
2286 kfree(kobj_path);
2287 }
2288
2289 seq_puts(s, "\n");
2290exit:
2291 mutex_unlock(&gpd->lock);
2292
2293 return 0;
2294}
2295
2296static int pm_genpd_summary_show(struct seq_file *s, void *data)
2297{
2298 struct generic_pm_domain *gpd;
2299 int ret = 0;
2300
2301 seq_puts(s, " domain status slaves\n");
2302 seq_puts(s, " /device runtime status\n");
2303 seq_puts(s, "----------------------------------------------------------------------\n");
2304
2305 ret = mutex_lock_interruptible(&gpd_list_lock);
2306 if (ret)
2307 return -ERESTARTSYS;
2308
2309 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2310 ret = pm_genpd_summary_one(s, gpd);
2311 if (ret)
2312 break;
2313 }
2314 mutex_unlock(&gpd_list_lock);
2315
2316 return ret;
2317}
2318
2319static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2320{
2321 return single_open(file, pm_genpd_summary_show, NULL);
2322}
2323
2324static const struct file_operations pm_genpd_summary_fops = {
2325 .open = pm_genpd_summary_open,
2326 .read = seq_read,
2327 .llseek = seq_lseek,
2328 .release = single_release,
2329};
2330
2331static int __init pm_genpd_debug_init(void)
2332{
2333 struct dentry *d;
2334
2335 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2336
2337 if (!pm_genpd_debugfs_dir)
2338 return -ENOMEM;
2339
2340 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2341 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2342 if (!d)
2343 return -ENOMEM;
2344
2345 return 0;
2346}
2347late_initcall(pm_genpd_debug_init);
2348
2349static void __exit pm_genpd_debug_exit(void)
2350{
2351 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2352}
2353__exitcall(pm_genpd_debug_exit);
2354#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index a089e3bcdfbc..d88a62e104d4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
42 * default_stop_ok - Default PM domain governor routine for stopping devices. 42 * default_stop_ok - Default PM domain governor routine for stopping devices.
43 * @dev: Device to check. 43 * @dev: Device to check.
44 */ 44 */
45bool default_stop_ok(struct device *dev) 45static bool default_stop_ok(struct device *dev)
46{ 46{
47 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 47 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
48 unsigned long flags; 48 unsigned long flags;
@@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
229 229
230#else /* !CONFIG_PM_RUNTIME */ 230#else /* !CONFIG_PM_RUNTIME */
231 231
232bool default_stop_ok(struct device *dev) 232static inline bool default_stop_ok(struct device *dev) { return false; }
233{
234 return false;
235}
236 233
237#define default_power_down_ok NULL 234#define default_power_down_ok NULL
238#define always_on_power_down_ok NULL 235#define always_on_power_down_ok NULL
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b67d9aef9fe4..44973196d3fd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
541 * enable device drivers to receive interrupts. 541 * enable device drivers to receive interrupts.
542 */ 542 */
543static void dpm_resume_noirq(pm_message_t state) 543void dpm_resume_noirq(pm_message_t state)
544{ 544{
545 struct device *dev; 545 struct device *dev;
546 ktime_t starttime = ktime_get(); 546 ktime_t starttime = ktime_get();
@@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie)
662 * dpm_resume_early - Execute "early resume" callbacks for all devices. 662 * dpm_resume_early - Execute "early resume" callbacks for all devices.
663 * @state: PM transition of the system being carried out. 663 * @state: PM transition of the system being carried out.
664 */ 664 */
665static void dpm_resume_early(pm_message_t state) 665void dpm_resume_early(pm_message_t state)
666{ 666{
667 struct device *dev; 667 struct device *dev;
668 ktime_t starttime = ktime_get(); 668 ktime_t starttime = ktime_get();
@@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev)
1093 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1093 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1094 * handlers for all non-sysdev devices. 1094 * handlers for all non-sysdev devices.
1095 */ 1095 */
1096static int dpm_suspend_noirq(pm_message_t state) 1096int dpm_suspend_noirq(pm_message_t state)
1097{ 1097{
1098 ktime_t starttime = ktime_get(); 1098 ktime_t starttime = ktime_get();
1099 int error = 0; 1099 int error = 0;
@@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev)
1232 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1232 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1233 * @state: PM transition of the system being carried out. 1233 * @state: PM transition of the system being carried out.
1234 */ 1234 */
1235static int dpm_suspend_late(pm_message_t state) 1235int dpm_suspend_late(pm_message_t state)
1236{ 1236{
1237 ktime_t starttime = ktime_get(); 1237 ktime_t starttime = ktime_get();
1238 int error = 0; 1238 int error = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95b181d1ca6d..a9d26ed11bf4 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -92,9 +92,6 @@
92 * wakeup_count - Report the number of wakeup events related to the device 92 * wakeup_count - Report the number of wakeup events related to the device
93 */ 93 */
94 94
95static const char enabled[] = "enabled";
96static const char disabled[] = "disabled";
97
98const char power_group_name[] = "power"; 95const char power_group_name[] = "power";
99EXPORT_SYMBOL_GPL(power_group_name); 96EXPORT_SYMBOL_GPL(power_group_name);
100 97
@@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
336#endif /* CONFIG_PM_RUNTIME */ 333#endif /* CONFIG_PM_RUNTIME */
337 334
338#ifdef CONFIG_PM_SLEEP 335#ifdef CONFIG_PM_SLEEP
336static const char _enabled[] = "enabled";
337static const char _disabled[] = "disabled";
338
339static ssize_t 339static ssize_t
340wake_show(struct device * dev, struct device_attribute *attr, char * buf) 340wake_show(struct device * dev, struct device_attribute *attr, char * buf)
341{ 341{
342 return sprintf(buf, "%s\n", device_can_wakeup(dev) 342 return sprintf(buf, "%s\n", device_can_wakeup(dev)
343 ? (device_may_wakeup(dev) ? enabled : disabled) 343 ? (device_may_wakeup(dev) ? _enabled : _disabled)
344 : ""); 344 : "");
345} 345}
346 346
@@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr,
357 cp = memchr(buf, '\n', n); 357 cp = memchr(buf, '\n', n);
358 if (cp) 358 if (cp)
359 len = cp - buf; 359 len = cp - buf;
360 if (len == sizeof enabled - 1 360 if (len == sizeof _enabled - 1
361 && strncmp(buf, enabled, sizeof enabled - 1) == 0) 361 && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
362 device_set_wakeup_enable(dev, 1); 362 device_set_wakeup_enable(dev, 1);
363 else if (len == sizeof disabled - 1 363 else if (len == sizeof _disabled - 1
364 && strncmp(buf, disabled, sizeof disabled - 1) == 0) 364 && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
365 device_set_wakeup_enable(dev, 0); 365 device_set_wakeup_enable(dev, 0);
366 else 366 else
367 return -EINVAL; 367 return -EINVAL;
@@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
570 char *buf) 570 char *buf)
571{ 571{
572 return sprintf(buf, "%s\n", 572 return sprintf(buf, "%s\n",
573 device_async_suspend_enabled(dev) ? enabled : disabled); 573 device_async_suspend_enabled(dev) ?
574 _enabled : _disabled);
574} 575}
575 576
576static ssize_t async_store(struct device *dev, struct device_attribute *attr, 577static ssize_t async_store(struct device *dev, struct device_attribute *attr,
@@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
582 cp = memchr(buf, '\n', n); 583 cp = memchr(buf, '\n', n);
583 if (cp) 584 if (cp)
584 len = cp - buf; 585 len = cp - buf;
585 if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) 586 if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
586 device_enable_async_suspend(dev); 587 device_enable_async_suspend(dev);
587 else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) 588 else if (len == sizeof _disabled - 1 &&
589 strncmp(buf, _disabled, len) == 0)
588 device_disable_async_suspend(dev); 590 device_disable_async_suspend(dev);
589 else 591 else
590 return -EINVAL; 592 return -EINVAL;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb1bd2ecad8b..c2744b30d5d9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@
24 */ 24 */
25bool events_check_enabled __read_mostly; 25bool events_check_enabled __read_mostly;
26 26
27/* If set and the system is suspending, terminate the suspend. */
28static bool pm_abort_suspend __read_mostly;
29
27/* 30/*
28 * Combined counters of registered wakeup events and wakeup events in progress. 31 * Combined counters of registered wakeup events and wakeup events in progress.
29 * They need to be modified together atomically, so it's better to use one 32 * They need to be modified together atomically, so it's better to use one
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void)
719 pm_print_active_wakeup_sources(); 722 pm_print_active_wakeup_sources();
720 } 723 }
721 724
722 return ret; 725 return ret || pm_abort_suspend;
726}
727
728void pm_system_wakeup(void)
729{
730 pm_abort_suspend = true;
731 freeze_wake();
732}
733
734void pm_wakeup_clear(void)
735{
736 pm_abort_suspend = false;
723} 737}
724 738
725/** 739/**
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index dbb8350ea8dc..8d98a329f6ea 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,7 +9,7 @@
9#include <linux/syscore_ops.h> 9#include <linux/syscore_ops.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/interrupt.h> 12#include <linux/suspend.h>
13#include <trace/events/power.h> 13#include <trace/events/power.h>
14 14
15static LIST_HEAD(syscore_ops_list); 15static LIST_HEAD(syscore_ops_list);
@@ -54,9 +54,8 @@ int syscore_suspend(void)
54 pr_debug("Checking wakeup interrupts\n"); 54 pr_debug("Checking wakeup interrupts\n");
55 55
56 /* Return error code if there are any wakeup interrupts pending. */ 56 /* Return error code if there are any wakeup interrupts pending. */
57 ret = check_wakeup_irqs(); 57 if (pm_wakeup_pending())
58 if (ret) 58 return -EBUSY;
59 return ret;
60 59
61 WARN_ONCE(!irqs_disabled(), 60 WARN_ONCE(!irqs_disabled(),
62 "Interrupts enabled before system core suspend.\n"); 61 "Interrupts enabled before system core suspend.\n");