aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/firmware_class.c4
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c539
-rw-r--r--drivers/base/power/domain_governor.c156
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
9 files changed, 912 insertions, 576 deletions
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3719c94be19c..26ab358dac62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
534 return 0; 534 return 0;
535 } 535 }
536 536
537 read_lock_usermodehelper();
538
537 if (WARN_ON(usermodehelper_is_disabled())) { 539 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name); 540 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY; 541 retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
572 fw_destroy_instance(fw_priv); 574 fw_destroy_instance(fw_priv);
573 575
574out: 576out:
577 read_unlock_usermodehelper();
578
575 if (retval) { 579 if (retval) {
576 release_firmware(firmware); 580 release_firmware(firmware);
577 *firmware_p = NULL; 581 *firmware_p = NULL;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a7c06374062e..f0c605e99ade 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
700 return ret; 700 return ret;
701} 701}
702 702
703int platform_pm_prepare(struct device *dev)
704{
705 struct device_driver *drv = dev->driver;
706 int ret = 0;
707
708 if (drv && drv->pm && drv->pm->prepare)
709 ret = drv->pm->prepare(dev);
710
711 return ret;
712}
713
714void platform_pm_complete(struct device *dev)
715{
716 struct device_driver *drv = dev->driver;
717
718 if (drv && drv->pm && drv->pm->complete)
719 drv->pm->complete(dev);
720}
721
722#endif /* CONFIG_PM_SLEEP */ 703#endif /* CONFIG_PM_SLEEP */
723 704
724#ifdef CONFIG_SUSPEND 705#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
741 return ret; 722 return ret;
742} 723}
743 724
744int platform_pm_suspend_noirq(struct device *dev)
745{
746 struct device_driver *drv = dev->driver;
747 int ret = 0;
748
749 if (!drv)
750 return 0;
751
752 if (drv->pm) {
753 if (drv->pm->suspend_noirq)
754 ret = drv->pm->suspend_noirq(dev);
755 }
756
757 return ret;
758}
759
760int platform_pm_resume(struct device *dev) 725int platform_pm_resume(struct device *dev)
761{ 726{
762 struct device_driver *drv = dev->driver; 727 struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
775 return ret; 740 return ret;
776} 741}
777 742
778int platform_pm_resume_noirq(struct device *dev)
779{
780 struct device_driver *drv = dev->driver;
781 int ret = 0;
782
783 if (!drv)
784 return 0;
785
786 if (drv->pm) {
787 if (drv->pm->resume_noirq)
788 ret = drv->pm->resume_noirq(dev);
789 }
790
791 return ret;
792}
793
794#endif /* CONFIG_SUSPEND */ 743#endif /* CONFIG_SUSPEND */
795 744
796#ifdef CONFIG_HIBERNATE_CALLBACKS 745#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
813 return ret; 762 return ret;
814} 763}
815 764
816int platform_pm_freeze_noirq(struct device *dev)
817{
818 struct device_driver *drv = dev->driver;
819 int ret = 0;
820
821 if (!drv)
822 return 0;
823
824 if (drv->pm) {
825 if (drv->pm->freeze_noirq)
826 ret = drv->pm->freeze_noirq(dev);
827 }
828
829 return ret;
830}
831
832int platform_pm_thaw(struct device *dev) 765int platform_pm_thaw(struct device *dev)
833{ 766{
834 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
847 return ret; 780 return ret;
848} 781}
849 782
850int platform_pm_thaw_noirq(struct device *dev)
851{
852 struct device_driver *drv = dev->driver;
853 int ret = 0;
854
855 if (!drv)
856 return 0;
857
858 if (drv->pm) {
859 if (drv->pm->thaw_noirq)
860 ret = drv->pm->thaw_noirq(dev);
861 }
862
863 return ret;
864}
865
866int platform_pm_poweroff(struct device *dev) 783int platform_pm_poweroff(struct device *dev)
867{ 784{
868 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
881 return ret; 798 return ret;
882} 799}
883 800
884int platform_pm_poweroff_noirq(struct device *dev)
885{
886 struct device_driver *drv = dev->driver;
887 int ret = 0;
888
889 if (!drv)
890 return 0;
891
892 if (drv->pm) {
893 if (drv->pm->poweroff_noirq)
894 ret = drv->pm->poweroff_noirq(dev);
895 }
896
897 return ret;
898}
899
900int platform_pm_restore(struct device *dev) 801int platform_pm_restore(struct device *dev)
901{ 802{
902 struct device_driver *drv = dev->driver; 803 struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
915 return ret; 816 return ret;
916} 817}
917 818
918int platform_pm_restore_noirq(struct device *dev)
919{
920 struct device_driver *drv = dev->driver;
921 int ret = 0;
922
923 if (!drv)
924 return 0;
925
926 if (drv->pm) {
927 if (drv->pm->restore_noirq)
928 ret = drv->pm->restore_noirq(dev);
929 }
930
931 return ret;
932}
933
934#endif /* CONFIG_HIBERNATE_CALLBACKS */ 819#endif /* CONFIG_HIBERNATE_CALLBACKS */
935 820
936static const struct dev_pm_ops platform_dev_pm_ops = { 821static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd17900..2e58ebb1f6c0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o 6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
7obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
8 8
9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5a..92e6a9048065 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
35
36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48})
18 49
19static LIST_HEAD(gpd_list); 50static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock); 51static DEFINE_MUTEX(gpd_list_lock);
21 52
22#ifdef CONFIG_PM 53#ifdef CONFIG_PM
23 54
24static struct generic_pm_domain *dev_to_genpd(struct device *dev) 55struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{ 56{
26 if (IS_ERR_OR_NULL(dev->pm_domain)) 57 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL); 58 return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
29 return pd_to_genpd(dev->pm_domain); 60 return pd_to_genpd(dev->pm_domain);
30} 61}
31 62
63static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64{
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
67}
68
69static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70{
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
73}
74
75static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76{
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
79}
80
81static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82{
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
86}
87
32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 88static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{ 89{
34 bool ret = false; 90 bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
145 } 201 }
146 202
147 if (genpd->power_on) { 203 if (genpd->power_on) {
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
148 ret = genpd->power_on(genpd); 207 ret = genpd->power_on(genpd);
149 if (ret) 208 if (ret)
150 goto err; 209 goto err;
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns;
214 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name,
217 elapsed_ns);
218 }
151 } 219 }
152 220
153 genpd_set_active(genpd); 221 genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
190{ 258{
191 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 259 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
192 struct device *dev = pdd->dev; 260 struct device *dev = pdd->dev;
193 struct device_driver *drv = dev->driver;
194 int ret = 0; 261 int ret = 0;
195 262
196 if (gpd_data->need_restore) 263 if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
198 265
199 mutex_unlock(&genpd->lock); 266 mutex_unlock(&genpd->lock);
200 267
201 if (drv && drv->pm && drv->pm->runtime_suspend) { 268 genpd_start_dev(genpd, dev);
202 if (genpd->start_device) 269 ret = genpd_save_dev(genpd, dev);
203 genpd->start_device(dev); 270 genpd_stop_dev(genpd, dev);
204
205 ret = drv->pm->runtime_suspend(dev);
206
207 if (genpd->stop_device)
208 genpd->stop_device(dev);
209 }
210 271
211 mutex_lock(&genpd->lock); 272 mutex_lock(&genpd->lock);
212 273
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
227{ 288{
228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 289 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229 struct device *dev = pdd->dev; 290 struct device *dev = pdd->dev;
230 struct device_driver *drv = dev->driver;
231 291
232 if (!gpd_data->need_restore) 292 if (!gpd_data->need_restore)
233 return; 293 return;
234 294
235 mutex_unlock(&genpd->lock); 295 mutex_unlock(&genpd->lock);
236 296
237 if (drv && drv->pm && drv->pm->runtime_resume) { 297 genpd_start_dev(genpd, dev);
238 if (genpd->start_device) 298 genpd_restore_dev(genpd, dev);
239 genpd->start_device(dev); 299 genpd_stop_dev(genpd, dev);
240
241 drv->pm->runtime_resume(dev);
242
243 if (genpd->stop_device)
244 genpd->stop_device(dev);
245 }
246 300
247 mutex_lock(&genpd->lock); 301 mutex_lock(&genpd->lock);
248 302
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
354 } 408 }
355 409
356 if (genpd->power_off) { 410 if (genpd->power_off) {
411 ktime_t time_start;
412 s64 elapsed_ns;
413
357 if (atomic_read(&genpd->sd_count) > 0) { 414 if (atomic_read(&genpd->sd_count) > 0) {
358 ret = -EBUSY; 415 ret = -EBUSY;
359 goto out; 416 goto out;
360 } 417 }
361 418
419 time_start = ktime_get();
420
362 /* 421 /*
363 * If sd_count > 0 at this point, one of the subdomains hasn't 422 * If sd_count > 0 at this point, one of the subdomains hasn't
364 * managed to call pm_genpd_poweron() for the master yet after 423 * managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
372 genpd_set_active(genpd); 431 genpd_set_active(genpd);
373 goto out; 432 goto out;
374 } 433 }
434
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns;
438 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name,
441 elapsed_ns);
442 }
375 } 443 }
376 444
377 genpd->status = GPD_STATE_POWER_OFF; 445 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
378 457
379 list_for_each_entry(link, &genpd->slave_links, slave_node) { 458 list_for_each_entry(link, &genpd->slave_links, slave_node) {
380 genpd_sd_counter_dec(link->master); 459 genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
413static int pm_genpd_runtime_suspend(struct device *dev) 492static int pm_genpd_runtime_suspend(struct device *dev)
414{ 493{
415 struct generic_pm_domain *genpd; 494 struct generic_pm_domain *genpd;
495 bool (*stop_ok)(struct device *__dev);
496 int ret;
416 497
417 dev_dbg(dev, "%s()\n", __func__); 498 dev_dbg(dev, "%s()\n", __func__);
418 499
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
422 503
423 might_sleep_if(!genpd->dev_irq_safe); 504 might_sleep_if(!genpd->dev_irq_safe);
424 505
425 if (genpd->stop_device) { 506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
426 int ret = genpd->stop_device(dev); 507 if (stop_ok && !stop_ok(dev))
427 if (ret) 508 return -EBUSY;
428 return ret; 509
429 } 510 ret = genpd_stop_dev(genpd, dev);
511 if (ret)
512 return ret;
513
514 pm_runtime_update_max_time_suspended(dev,
515 dev_gpd_data(dev)->td.start_latency_ns);
430 516
431 /* 517 /*
432 * If power.irq_safe is set, this routine will be run with interrupts 518 * If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
502 mutex_unlock(&genpd->lock); 588 mutex_unlock(&genpd->lock);
503 589
504 out: 590 out:
505 if (genpd->start_device) 591 genpd_start_dev(genpd, dev);
506 genpd->start_device(dev);
507 592
508 return 0; 593 return 0;
509} 594}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
534 619
535#ifdef CONFIG_PM_SLEEP 620#ifdef CONFIG_PM_SLEEP
536 621
622static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
623 struct device *dev)
624{
625 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
626}
627
628static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
629{
630 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
631}
632
633static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
634{
635 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
636}
637
638static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
639{
640 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
641}
642
643static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
644{
645 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
646}
647
648static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
649{
650 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
651}
652
653static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
654{
655 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
656}
657
658static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
659{
660 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
661}
662
663static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
664{
665 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
666}
667
537/** 668/**
538 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 669 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
539 * @genpd: PM domain to power off, if possible. 670 * @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
590 if (!device_can_wakeup(dev)) 721 if (!device_can_wakeup(dev))
591 return false; 722 return false;
592 723
593 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); 724 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
594 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 725 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
595} 726}
596 727
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
646 /* 777 /*
647 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 778 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
648 * so pm_genpd_poweron() will return immediately, but if the device 779 * so pm_genpd_poweron() will return immediately, but if the device
649 * is suspended (e.g. it's been stopped by .stop_device()), we need 780 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
650 * to make it operational. 781 * to make it operational.
651 */ 782 */
652 pm_runtime_resume(dev); 783 pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
685 if (IS_ERR(genpd)) 816 if (IS_ERR(genpd))
686 return -EINVAL; 817 return -EINVAL;
687 818
688 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 819 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
689} 820}
690 821
691/** 822/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
710 if (genpd->suspend_power_off) 841 if (genpd->suspend_power_off)
711 return 0; 842 return 0;
712 843
713 ret = pm_generic_suspend_noirq(dev); 844 ret = genpd_suspend_late(genpd, dev);
714 if (ret) 845 if (ret)
715 return ret; 846 return ret;
716 847
717 if (dev->power.wakeup_path 848 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
718 && genpd->active_wakeup && genpd->active_wakeup(dev))
719 return 0; 849 return 0;
720 850
721 if (genpd->stop_device) 851 genpd_stop_dev(genpd, dev);
722 genpd->stop_device(dev);
723 852
724 /* 853 /*
725 * Since all of the "noirq" callbacks are executed sequentially, it is 854 * Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
761 */ 890 */
762 pm_genpd_poweron(genpd); 891 pm_genpd_poweron(genpd);
763 genpd->suspended_count--; 892 genpd->suspended_count--;
764 if (genpd->start_device) 893 genpd_start_dev(genpd, dev);
765 genpd->start_device(dev);
766 894
767 return pm_generic_resume_noirq(dev); 895 return genpd_resume_early(genpd, dev);
768} 896}
769 897
770/** 898/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
785 if (IS_ERR(genpd)) 913 if (IS_ERR(genpd))
786 return -EINVAL; 914 return -EINVAL;
787 915
788 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 916 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
789} 917}
790 918
791/** 919/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
806 if (IS_ERR(genpd)) 934 if (IS_ERR(genpd))
807 return -EINVAL; 935 return -EINVAL;
808 936
809 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 937 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
810} 938}
811 939
812/** 940/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
832 if (genpd->suspend_power_off) 960 if (genpd->suspend_power_off)
833 return 0; 961 return 0;
834 962
835 ret = pm_generic_freeze_noirq(dev); 963 ret = genpd_freeze_late(genpd, dev);
836 if (ret) 964 if (ret)
837 return ret; 965 return ret;
838 966
839 if (genpd->stop_device) 967 genpd_stop_dev(genpd, dev);
840 genpd->stop_device(dev);
841 968
842 return 0; 969 return 0;
843} 970}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
864 if (genpd->suspend_power_off) 991 if (genpd->suspend_power_off)
865 return 0; 992 return 0;
866 993
867 if (genpd->start_device) 994 genpd_start_dev(genpd, dev);
868 genpd->start_device(dev);
869 995
870 return pm_generic_thaw_noirq(dev); 996 return genpd_thaw_early(genpd, dev);
871} 997}
872 998
873/** 999/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
888 if (IS_ERR(genpd)) 1014 if (IS_ERR(genpd))
889 return -EINVAL; 1015 return -EINVAL;
890 1016
891 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1017 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
892}
893
894/**
895 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
896 * @dev: Device to suspend.
897 *
898 * Power off a device under the assumption that its pm_domain field points to
899 * the domain member of an object of type struct generic_pm_domain representing
900 * a PM domain consisting of I/O devices.
901 */
902static int pm_genpd_dev_poweroff(struct device *dev)
903{
904 struct generic_pm_domain *genpd;
905
906 dev_dbg(dev, "%s()\n", __func__);
907
908 genpd = dev_to_genpd(dev);
909 if (IS_ERR(genpd))
910 return -EINVAL;
911
912 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
913}
914
915/**
916 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
917 * @dev: Device to suspend.
918 *
919 * Carry out a late powering off of a device under the assumption that its
920 * pm_domain field points to the domain member of an object of type
921 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
922 */
923static int pm_genpd_dev_poweroff_noirq(struct device *dev)
924{
925 struct generic_pm_domain *genpd;
926 int ret;
927
928 dev_dbg(dev, "%s()\n", __func__);
929
930 genpd = dev_to_genpd(dev);
931 if (IS_ERR(genpd))
932 return -EINVAL;
933
934 if (genpd->suspend_power_off)
935 return 0;
936
937 ret = pm_generic_poweroff_noirq(dev);
938 if (ret)
939 return ret;
940
941 if (dev->power.wakeup_path
942 && genpd->active_wakeup && genpd->active_wakeup(dev))
943 return 0;
944
945 if (genpd->stop_device)
946 genpd->stop_device(dev);
947
948 /*
949 * Since all of the "noirq" callbacks are executed sequentially, it is
950 * guaranteed that this function will never run twice in parallel for
951 * the same PM domain, so it is not necessary to use locking here.
952 */
953 genpd->suspended_count++;
954 pm_genpd_sync_poweroff(genpd);
955
956 return 0;
957} 1018}
958 1019
959/** 1020/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
993 1054
994 pm_genpd_poweron(genpd); 1055 pm_genpd_poweron(genpd);
995 genpd->suspended_count--; 1056 genpd->suspended_count--;
996 if (genpd->start_device) 1057 genpd_start_dev(genpd, dev);
997 genpd->start_device(dev);
998
999 return pm_generic_restore_noirq(dev);
1000}
1001
1002/**
1003 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1004 * @dev: Device to resume.
1005 *
1006 * Restore a device under the assumption that its pm_domain field points to the
1007 * domain member of an object of type struct generic_pm_domain representing
1008 * a power domain consisting of I/O devices.
1009 */
1010static int pm_genpd_restore(struct device *dev)
1011{
1012 struct generic_pm_domain *genpd;
1013
1014 dev_dbg(dev, "%s()\n", __func__);
1015
1016 genpd = dev_to_genpd(dev);
1017 if (IS_ERR(genpd))
1018 return -EINVAL;
1019 1058
1020 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); 1059 return genpd_resume_early(genpd, dev);
1021} 1060}
1022 1061
1023/** 1062/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
1067#define pm_genpd_freeze_noirq NULL 1106#define pm_genpd_freeze_noirq NULL
1068#define pm_genpd_thaw_noirq NULL 1107#define pm_genpd_thaw_noirq NULL
1069#define pm_genpd_thaw NULL 1108#define pm_genpd_thaw NULL
1070#define pm_genpd_dev_poweroff_noirq NULL
1071#define pm_genpd_dev_poweroff NULL
1072#define pm_genpd_restore_noirq NULL 1109#define pm_genpd_restore_noirq NULL
1073#define pm_genpd_restore NULL
1074#define pm_genpd_complete NULL 1110#define pm_genpd_complete NULL
1075 1111
1076#endif /* CONFIG_PM_SLEEP */ 1112#endif /* CONFIG_PM_SLEEP */
1077 1113
1078/** 1114/**
1079 * pm_genpd_add_device - Add a device to an I/O PM domain. 1115 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1080 * @genpd: PM domain to add the device to. 1116 * @genpd: PM domain to add the device to.
1081 * @dev: Device to be added. 1117 * @dev: Device to be added.
1118 * @td: Set of PM QoS timing parameters to attach to the device.
1082 */ 1119 */
1083int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1120int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1121 struct gpd_timing_data *td)
1084{ 1122{
1085 struct generic_pm_domain_data *gpd_data; 1123 struct generic_pm_domain_data *gpd_data;
1086 struct pm_domain_data *pdd; 1124 struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1123 gpd_data->base.dev = dev; 1161 gpd_data->base.dev = dev;
1124 gpd_data->need_restore = false; 1162 gpd_data->need_restore = false;
1125 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1163 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1164 if (td)
1165 gpd_data->td = *td;
1126 1166
1127 out: 1167 out:
1128 genpd_release_lock(genpd); 1168 genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1280} 1320}
1281 1321
1282/** 1322/**
1323 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1324 * @dev: Device to add the callbacks to.
1325 * @ops: Set of callbacks to add.
1326 * @td: Timing data to add to the device along with the callbacks (optional).
1327 */
1328int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1329 struct gpd_timing_data *td)
1330{
1331 struct pm_domain_data *pdd;
1332 int ret = 0;
1333
1334 if (!(dev && dev->power.subsys_data && ops))
1335 return -EINVAL;
1336
1337 pm_runtime_disable(dev);
1338 device_pm_lock();
1339
1340 pdd = dev->power.subsys_data->domain_data;
1341 if (pdd) {
1342 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1343
1344 gpd_data->ops = *ops;
1345 if (td)
1346 gpd_data->td = *td;
1347 } else {
1348 ret = -EINVAL;
1349 }
1350
1351 device_pm_unlock();
1352 pm_runtime_enable(dev);
1353
1354 return ret;
1355}
1356EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1357
1358/**
1359 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1360 * @dev: Device to remove the callbacks from.
1361 * @clear_td: If set, clear the device's timing data too.
1362 */
1363int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1364{
1365 struct pm_domain_data *pdd;
1366 int ret = 0;
1367
1368 if (!(dev && dev->power.subsys_data))
1369 return -EINVAL;
1370
1371 pm_runtime_disable(dev);
1372 device_pm_lock();
1373
1374 pdd = dev->power.subsys_data->domain_data;
1375 if (pdd) {
1376 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1377
1378 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1379 if (clear_td)
1380 gpd_data->td = (struct gpd_timing_data){ 0 };
1381 } else {
1382 ret = -EINVAL;
1383 }
1384
1385 device_pm_unlock();
1386 pm_runtime_enable(dev);
1387
1388 return ret;
1389}
1390EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1391
1392/* Default device callbacks for generic PM domains. */
1393
1394/**
1395 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1396 * @dev: Device to handle.
1397 */
1398static int pm_genpd_default_save_state(struct device *dev)
1399{
1400 int (*cb)(struct device *__dev);
1401 struct device_driver *drv = dev->driver;
1402
1403 cb = dev_gpd_data(dev)->ops.save_state;
1404 if (cb)
1405 return cb(dev);
1406
1407 if (drv && drv->pm && drv->pm->runtime_suspend)
1408 return drv->pm->runtime_suspend(dev);
1409
1410 return 0;
1411}
1412
1413/**
1414 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1415 * @dev: Device to handle.
1416 */
1417static int pm_genpd_default_restore_state(struct device *dev)
1418{
1419 int (*cb)(struct device *__dev);
1420 struct device_driver *drv = dev->driver;
1421
1422 cb = dev_gpd_data(dev)->ops.restore_state;
1423 if (cb)
1424 return cb(dev);
1425
1426 if (drv && drv->pm && drv->pm->runtime_resume)
1427 return drv->pm->runtime_resume(dev);
1428
1429 return 0;
1430}
1431
1432/**
1433 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1434 * @dev: Device to handle.
1435 */
1436static int pm_genpd_default_suspend(struct device *dev)
1437{
1438 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1439
1440 return cb ? cb(dev) : pm_generic_suspend(dev);
1441}
1442
1443/**
1444 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1445 * @dev: Device to handle.
1446 */
1447static int pm_genpd_default_suspend_late(struct device *dev)
1448{
1449 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1450
1451 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1452}
1453
1454/**
1455 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1456 * @dev: Device to handle.
1457 */
1458static int pm_genpd_default_resume_early(struct device *dev)
1459{
1460 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1461
1462 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1463}
1464
1465/**
1466 * pm_genpd_default_resume - Default "device resume" for PM domians.
1467 * @dev: Device to handle.
1468 */
1469static int pm_genpd_default_resume(struct device *dev)
1470{
1471 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1472
1473 return cb ? cb(dev) : pm_generic_resume(dev);
1474}
1475
1476/**
1477 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1478 * @dev: Device to handle.
1479 */
1480static int pm_genpd_default_freeze(struct device *dev)
1481{
1482 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1483
1484 return cb ? cb(dev) : pm_generic_freeze(dev);
1485}
1486
1487/**
1488 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1489 * @dev: Device to handle.
1490 */
1491static int pm_genpd_default_freeze_late(struct device *dev)
1492{
1493 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1494
1495 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1496}
1497
1498/**
1499 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1500 * @dev: Device to handle.
1501 */
1502static int pm_genpd_default_thaw_early(struct device *dev)
1503{
1504 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1505
1506 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1507}
1508
1509/**
1510 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1511 * @dev: Device to handle.
1512 */
1513static int pm_genpd_default_thaw(struct device *dev)
1514{
1515 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1516
1517 return cb ? cb(dev) : pm_generic_thaw(dev);
1518}
1519
1520/**
1283 * pm_genpd_init - Initialize a generic I/O PM domain object. 1521 * pm_genpd_init - Initialize a generic I/O PM domain object.
1284 * @genpd: PM domain object to initialize. 1522 * @genpd: PM domain object to initialize.
1285 * @gov: PM domain governor to associate with the domain (may be NULL). 1523 * @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1305 genpd->resume_count = 0; 1543 genpd->resume_count = 0;
1306 genpd->device_count = 0; 1544 genpd->device_count = 0;
1307 genpd->suspended_count = 0; 1545 genpd->suspended_count = 0;
1546 genpd->max_off_time_ns = -1;
1308 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1547 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1309 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1548 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1310 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1549 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1556 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1557 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.thaw = pm_genpd_thaw; 1558 genpd->domain.ops.thaw = pm_genpd_thaw;
1320 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; 1559 genpd->domain.ops.poweroff = pm_genpd_suspend;
1321 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; 1560 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1322 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1561 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1323 genpd->domain.ops.restore = pm_genpd_restore; 1562 genpd->domain.ops.restore = pm_genpd_resume;
1324 genpd->domain.ops.complete = pm_genpd_complete; 1563 genpd->domain.ops.complete = pm_genpd_complete;
1564 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1565 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1566 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1567 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1568 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1569 genpd->dev_ops.resume = pm_genpd_default_resume;
1570 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1571 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1572 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1573 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1325 mutex_lock(&gpd_list_lock); 1574 mutex_lock(&gpd_list_lock);
1326 list_add(&genpd->gpd_list_node, &gpd_list); 1575 list_add(&genpd->gpd_list_node, &gpd_list);
1327 mutex_unlock(&gpd_list_lock); 1576 mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000000..51527ee92d10
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
1/*
2 * drivers/base/power/domain_governor.c - Governors for device PM domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/pm_domain.h>
12#include <linux/pm_qos.h>
13#include <linux/hrtimer.h>
14
15/**
16 * default_stop_ok - Default PM domain governor routine for stopping devices.
17 * @dev: Device to check.
18 */
19bool default_stop_ok(struct device *dev)
20{
21 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
22
23 dev_dbg(dev, "%s()\n", __func__);
24
25 if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
26 return true;
27
28 return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
29 && td->break_even_ns < dev->power.max_time_suspended_ns;
30}
31
32/**
33 * default_power_down_ok - Default generic PM domain power off governor routine.
34 * @pd: PM domain to check.
35 *
36 * This routine must be executed under the PM domain's lock.
37 */
38static bool default_power_down_ok(struct dev_pm_domain *pd)
39{
40 struct generic_pm_domain *genpd = pd_to_genpd(pd);
41 struct gpd_link *link;
42 struct pm_domain_data *pdd;
43 s64 min_dev_off_time_ns;
44 s64 off_on_time_ns;
45 ktime_t time_now = ktime_get();
46
47 off_on_time_ns = genpd->power_off_latency_ns +
48 genpd->power_on_latency_ns;
49 /*
50 * It doesn't make sense to remove power from the domain if saving
51 * the state of all devices in it and the power off/power on operations
52 * take too much time.
53 *
54 * All devices in this domain have been stopped already at this point.
55 */
56 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
57 if (pdd->dev->driver)
58 off_on_time_ns +=
59 to_gpd_data(pdd)->td.save_state_latency_ns;
60 }
61
62 /*
63 * Check if subdomains can be off for enough time.
64 *
65 * All subdomains have been powered off already at this point.
66 */
67 list_for_each_entry(link, &genpd->master_links, master_node) {
68 struct generic_pm_domain *sd = link->slave;
69 s64 sd_max_off_ns = sd->max_off_time_ns;
70
71 if (sd_max_off_ns < 0)
72 continue;
73
74 sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
75 sd->power_off_time));
76 /*
77 * Check if the subdomain is allowed to be off long enough for
78 * the current domain to turn off and on (that's how much time
79 * it will have to wait worst case).
80 */
81 if (sd_max_off_ns <= off_on_time_ns)
82 return false;
83 }
84
85 /*
86 * Check if the devices in the domain can be off enough time.
87 */
88 min_dev_off_time_ns = -1;
89 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
90 struct gpd_timing_data *td;
91 struct device *dev = pdd->dev;
92 s64 dev_off_time_ns;
93
94 if (!dev->driver || dev->power.max_time_suspended_ns < 0)
95 continue;
96
97 td = &to_gpd_data(pdd)->td;
98 dev_off_time_ns = dev->power.max_time_suspended_ns -
99 (td->start_latency_ns + td->restore_state_latency_ns +
100 ktime_to_ns(ktime_sub(time_now,
101 dev->power.suspend_time)));
102 if (dev_off_time_ns <= off_on_time_ns)
103 return false;
104
105 if (min_dev_off_time_ns > dev_off_time_ns
106 || min_dev_off_time_ns < 0)
107 min_dev_off_time_ns = dev_off_time_ns;
108 }
109
110 if (min_dev_off_time_ns < 0) {
111 /*
112 * There are no latency constraints, so the domain can spend
113 * arbitrary time in the "off" state.
114 */
115 genpd->max_off_time_ns = -1;
116 return true;
117 }
118
119 /*
120 * The difference between the computed minimum delta and the time needed
121 * to turn the domain on is the maximum theoretical time this domain can
122 * spend in the "off" state.
123 */
124 min_dev_off_time_ns -= genpd->power_on_latency_ns;
125
126 /*
127 * If the difference between the computed minimum delta and the time
128 * needed to turn the domain off and back on on is smaller than the
129 * domain's power break even time, removing power from the domain is not
130 * worth it.
131 */
132 if (genpd->break_even_ns >
133 min_dev_off_time_ns - genpd->power_off_latency_ns)
134 return false;
135
136 genpd->max_off_time_ns = min_dev_off_time_ns;
137 return true;
138}
139
140struct dev_power_governor simple_qos_governor = {
141 .stop_ok = default_stop_ok,
142 .power_down_ok = default_power_down_ok,
143};
144
145static bool always_on_power_down_ok(struct dev_pm_domain *domain)
146{
147 return false;
148}
149
150/**
151 * pm_genpd_gov_always_on - A governor implementing an always-on policy
152 */
153struct dev_power_governor pm_domain_always_on_gov = {
154 .power_down_ok = always_on_power_down_ok,
155 .stop_ok = default_stop_ok,
156};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49e..10bdd793f0bd 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
97 * @event: PM transition of the system under way. 97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage. 98 * @bool: Whether or not this is the "noirq" stage.
99 * 99 *
100 * If the device has not been suspended at run time, execute the 100 * Execute the PM callback corresponding to @event provided by the driver of
101 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and 101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * return its error code. Otherwise, return zero. 102 * not present.
103 */ 103 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 104static int __pm_generic_call(struct device *dev, int event, bool noirq)
105{ 105{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *); 107 int (*callback)(struct device *);
108 108
109 if (!pm || pm_runtime_suspended(dev)) 109 if (!pm)
110 return 0; 110 return 0;
111 111
112 switch (event) { 112 switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
119 case PM_EVENT_HIBERNATE: 119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff; 120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break; 121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
122 case PM_EVENT_THAW: 125 case PM_EVENT_THAW:
123 callback = noirq ? pm->thaw_noirq : pm->thaw; 126 callback = noirq ? pm->thaw_noirq : pm->thaw;
124 break; 127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
125 default: 131 default:
126 callback = NULL; 132 callback = NULL;
127 break; 133 break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
211EXPORT_SYMBOL_GPL(pm_generic_thaw); 217EXPORT_SYMBOL_GPL(pm_generic_thaw);
212 218
213/** 219/**
214 * __pm_generic_resume - Generic resume/restore callback for subsystems.
215 * @dev: Device to handle.
216 * @event: PM transition of the system under way.
217 * @bool: Whether or not this is the "noirq" stage.
218 *
219 * Execute the resume/resotre callback provided by the @dev's driver, if
220 * defined. If it returns 0, change the device's runtime PM status to 'active'.
221 * Return the callback's error code.
222 */
223static int __pm_generic_resume(struct device *dev, int event, bool noirq)
224{
225 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
226 int (*callback)(struct device *);
227 int ret;
228
229 if (!pm)
230 return 0;
231
232 switch (event) {
233 case PM_EVENT_RESUME:
234 callback = noirq ? pm->resume_noirq : pm->resume;
235 break;
236 case PM_EVENT_RESTORE:
237 callback = noirq ? pm->restore_noirq : pm->restore;
238 break;
239 default:
240 callback = NULL;
241 break;
242 }
243
244 if (!callback)
245 return 0;
246
247 ret = callback(dev);
248 if (!ret && !noirq && pm_runtime_enabled(dev)) {
249 pm_runtime_disable(dev);
250 pm_runtime_set_active(dev);
251 pm_runtime_enable(dev);
252 }
253
254 return ret;
255}
256
257/**
258 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. 220 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
259 * @dev: Device to resume. 221 * @dev: Device to resume.
260 */ 222 */
261int pm_generic_resume_noirq(struct device *dev) 223int pm_generic_resume_noirq(struct device *dev)
262{ 224{
263 return __pm_generic_resume(dev, PM_EVENT_RESUME, true); 225 return __pm_generic_call(dev, PM_EVENT_RESUME, true);
264} 226}
265EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
266 228
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
270 */ 232 */
271int pm_generic_resume(struct device *dev) 233int pm_generic_resume(struct device *dev)
272{ 234{
273 return __pm_generic_resume(dev, PM_EVENT_RESUME, false); 235 return __pm_generic_call(dev, PM_EVENT_RESUME, false);
274} 236}
275EXPORT_SYMBOL_GPL(pm_generic_resume); 237EXPORT_SYMBOL_GPL(pm_generic_resume);
276 238
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
280 */ 242 */
281int pm_generic_restore_noirq(struct device *dev) 243int pm_generic_restore_noirq(struct device *dev)
282{ 244{
283 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); 245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
284} 246}
285EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
286 248
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
290 */ 252 */
291int pm_generic_restore(struct device *dev) 253int pm_generic_restore(struct device *dev)
292{ 254{
293 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); 255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
294} 256}
295EXPORT_SYMBOL_GPL(pm_generic_restore); 257EXPORT_SYMBOL_GPL(pm_generic_restore);
296 258
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
314 pm_runtime_idle(dev); 276 pm_runtime_idle(dev);
315} 277}
316#endif /* CONFIG_PM_SLEEP */ 278#endif /* CONFIG_PM_SLEEP */
317
318struct dev_pm_ops generic_subsys_pm_ops = {
319#ifdef CONFIG_PM_SLEEP
320 .prepare = pm_generic_prepare,
321 .suspend = pm_generic_suspend,
322 .suspend_noirq = pm_generic_suspend_noirq,
323 .resume = pm_generic_resume,
324 .resume_noirq = pm_generic_resume_noirq,
325 .freeze = pm_generic_freeze,
326 .freeze_noirq = pm_generic_freeze_noirq,
327 .thaw = pm_generic_thaw,
328 .thaw_noirq = pm_generic_thaw_noirq,
329 .poweroff = pm_generic_poweroff,
330 .poweroff_noirq = pm_generic_poweroff_noirq,
331 .restore = pm_generic_restore,
332 .restore_noirq = pm_generic_restore_noirq,
333 .complete = pm_generic_complete,
334#endif
335#ifdef CONFIG_PM_RUNTIME
336 .runtime_suspend = pm_generic_runtime_suspend,
337 .runtime_resume = pm_generic_runtime_resume,
338 .runtime_idle = pm_generic_runtime_idle,
339#endif
340};
341EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438d..e2cc3d2e0ecc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
32#include "../base.h" 32#include "../base.h"
33#include "power.h" 33#include "power.h"
34 34
35typedef int (*pm_callback_t)(struct device *);
36
35/* 37/*
36 * The entries in the dpm_list list are in a depth first order, simply 38 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and 39 * because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0); 166 ktime_t calltime = ktime_set(0, 0);
165 167
166 if (initcall_debug) { 168 if (initcall_debug) {
167 pr_info("calling %s+ @ %i\n", 169 pr_info("calling %s+ @ %i, parent: %s\n",
168 dev_name(dev), task_pid_nr(current)); 170 dev_name(dev), task_pid_nr(current),
171 dev->parent ? dev_name(dev->parent) : "none");
169 calltime = ktime_get(); 172 calltime = ktime_get();
170 } 173 }
171 174
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
211} 214}
212 215
213/** 216/**
214 * pm_op - Execute the PM operation appropriate for given PM event. 217 * pm_op - Return the PM operation appropriate for given PM event.
215 * @dev: Device to handle.
216 * @ops: PM operations to choose from. 218 * @ops: PM operations to choose from.
217 * @state: PM transition of the system being carried out. 219 * @state: PM transition of the system being carried out.
218 */ 220 */
219static int pm_op(struct device *dev, 221static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 const struct dev_pm_ops *ops,
221 pm_message_t state)
222{ 222{
223 int error = 0;
224 ktime_t calltime;
225
226 calltime = initcall_debug_start(dev);
227
228 switch (state.event) { 223 switch (state.event) {
229#ifdef CONFIG_SUSPEND 224#ifdef CONFIG_SUSPEND
230 case PM_EVENT_SUSPEND: 225 case PM_EVENT_SUSPEND:
231 if (ops->suspend) { 226 return ops->suspend;
232 error = ops->suspend(dev);
233 suspend_report_result(ops->suspend, error);
234 }
235 break;
236 case PM_EVENT_RESUME: 227 case PM_EVENT_RESUME:
237 if (ops->resume) { 228 return ops->resume;
238 error = ops->resume(dev);
239 suspend_report_result(ops->resume, error);
240 }
241 break;
242#endif /* CONFIG_SUSPEND */ 229#endif /* CONFIG_SUSPEND */
243#ifdef CONFIG_HIBERNATE_CALLBACKS 230#ifdef CONFIG_HIBERNATE_CALLBACKS
244 case PM_EVENT_FREEZE: 231 case PM_EVENT_FREEZE:
245 case PM_EVENT_QUIESCE: 232 case PM_EVENT_QUIESCE:
246 if (ops->freeze) { 233 return ops->freeze;
247 error = ops->freeze(dev);
248 suspend_report_result(ops->freeze, error);
249 }
250 break;
251 case PM_EVENT_HIBERNATE: 234 case PM_EVENT_HIBERNATE:
252 if (ops->poweroff) { 235 return ops->poweroff;
253 error = ops->poweroff(dev);
254 suspend_report_result(ops->poweroff, error);
255 }
256 break;
257 case PM_EVENT_THAW: 236 case PM_EVENT_THAW:
258 case PM_EVENT_RECOVER: 237 case PM_EVENT_RECOVER:
259 if (ops->thaw) { 238 return ops->thaw;
260 error = ops->thaw(dev);
261 suspend_report_result(ops->thaw, error);
262 }
263 break; 239 break;
264 case PM_EVENT_RESTORE: 240 case PM_EVENT_RESTORE:
265 if (ops->restore) { 241 return ops->restore;
266 error = ops->restore(dev);
267 suspend_report_result(ops->restore, error);
268 }
269 break;
270#endif /* CONFIG_HIBERNATE_CALLBACKS */ 242#endif /* CONFIG_HIBERNATE_CALLBACKS */
271 default:
272 error = -EINVAL;
273 } 243 }
274 244
275 initcall_debug_report(dev, calltime, error); 245 return NULL;
276
277 return error;
278} 246}
279 247
280/** 248/**
281 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 249 * pm_noirq_op - Return the PM operation appropriate for given PM event.
282 * @dev: Device to handle.
283 * @ops: PM operations to choose from. 250 * @ops: PM operations to choose from.
284 * @state: PM transition of the system being carried out. 251 * @state: PM transition of the system being carried out.
285 * 252 *
286 * The driver of @dev will not receive interrupts while this function is being 253 * The driver of @dev will not receive interrupts while this function is being
287 * executed. 254 * executed.
288 */ 255 */
289static int pm_noirq_op(struct device *dev, 256static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
290 const struct dev_pm_ops *ops,
291 pm_message_t state)
292{ 257{
293 int error = 0;
294 ktime_t calltime = ktime_set(0, 0), delta, rettime;
295
296 if (initcall_debug) {
297 pr_info("calling %s+ @ %i, parent: %s\n",
298 dev_name(dev), task_pid_nr(current),
299 dev->parent ? dev_name(dev->parent) : "none");
300 calltime = ktime_get();
301 }
302
303 switch (state.event) { 258 switch (state.event) {
304#ifdef CONFIG_SUSPEND 259#ifdef CONFIG_SUSPEND
305 case PM_EVENT_SUSPEND: 260 case PM_EVENT_SUSPEND:
306 if (ops->suspend_noirq) { 261 return ops->suspend_noirq;
307 error = ops->suspend_noirq(dev);
308 suspend_report_result(ops->suspend_noirq, error);
309 }
310 break;
311 case PM_EVENT_RESUME: 262 case PM_EVENT_RESUME:
312 if (ops->resume_noirq) { 263 return ops->resume_noirq;
313 error = ops->resume_noirq(dev);
314 suspend_report_result(ops->resume_noirq, error);
315 }
316 break;
317#endif /* CONFIG_SUSPEND */ 264#endif /* CONFIG_SUSPEND */
318#ifdef CONFIG_HIBERNATE_CALLBACKS 265#ifdef CONFIG_HIBERNATE_CALLBACKS
319 case PM_EVENT_FREEZE: 266 case PM_EVENT_FREEZE:
320 case PM_EVENT_QUIESCE: 267 case PM_EVENT_QUIESCE:
321 if (ops->freeze_noirq) { 268 return ops->freeze_noirq;
322 error = ops->freeze_noirq(dev);
323 suspend_report_result(ops->freeze_noirq, error);
324 }
325 break;
326 case PM_EVENT_HIBERNATE: 269 case PM_EVENT_HIBERNATE:
327 if (ops->poweroff_noirq) { 270 return ops->poweroff_noirq;
328 error = ops->poweroff_noirq(dev);
329 suspend_report_result(ops->poweroff_noirq, error);
330 }
331 break;
332 case PM_EVENT_THAW: 271 case PM_EVENT_THAW:
333 case PM_EVENT_RECOVER: 272 case PM_EVENT_RECOVER:
334 if (ops->thaw_noirq) { 273 return ops->thaw_noirq;
335 error = ops->thaw_noirq(dev);
336 suspend_report_result(ops->thaw_noirq, error);
337 }
338 break;
339 case PM_EVENT_RESTORE: 274 case PM_EVENT_RESTORE:
340 if (ops->restore_noirq) { 275 return ops->restore_noirq;
341 error = ops->restore_noirq(dev);
342 suspend_report_result(ops->restore_noirq, error);
343 }
344 break;
345#endif /* CONFIG_HIBERNATE_CALLBACKS */ 276#endif /* CONFIG_HIBERNATE_CALLBACKS */
346 default:
347 error = -EINVAL;
348 }
349
350 if (initcall_debug) {
351 rettime = ktime_get();
352 delta = ktime_sub(rettime, calltime);
353 printk("initcall %s_i+ returned %d after %Ld usecs\n",
354 dev_name(dev), error,
355 (unsigned long long)ktime_to_ns(delta) >> 10);
356 } 277 }
357 278
358 return error; 279 return NULL;
359} 280}
360 281
361static char *pm_verb(int event) 282static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
413 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 334 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
414} 335}
415 336
337static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338 pm_message_t state, char *info)
339{
340 ktime_t calltime;
341 int error;
342
343 if (!cb)
344 return 0;
345
346 calltime = initcall_debug_start(dev);
347
348 pm_dev_dbg(dev, state, info);
349 error = cb(dev);
350 suspend_report_result(cb, error);
351
352 initcall_debug_report(dev, calltime, error);
353
354 return error;
355}
356
416/*------------------------- Resume routines -------------------------*/ 357/*------------------------- Resume routines -------------------------*/
417 358
418/** 359/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
425 */ 366 */
426static int device_resume_noirq(struct device *dev, pm_message_t state) 367static int device_resume_noirq(struct device *dev, pm_message_t state)
427{ 368{
369 pm_callback_t callback = NULL;
370 char *info = NULL;
428 int error = 0; 371 int error = 0;
429 372
430 TRACE_DEVICE(dev); 373 TRACE_DEVICE(dev);
431 TRACE_RESUME(0); 374 TRACE_RESUME(0);
432 375
433 if (dev->pm_domain) { 376 if (dev->pm_domain) {
434 pm_dev_dbg(dev, state, "EARLY power domain "); 377 info = "EARLY power domain ";
435 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 378 callback = pm_noirq_op(&dev->pm_domain->ops, state);
436 } else if (dev->type && dev->type->pm) { 379 } else if (dev->type && dev->type->pm) {
437 pm_dev_dbg(dev, state, "EARLY type "); 380 info = "EARLY type ";
438 error = pm_noirq_op(dev, dev->type->pm, state); 381 callback = pm_noirq_op(dev->type->pm, state);
439 } else if (dev->class && dev->class->pm) { 382 } else if (dev->class && dev->class->pm) {
440 pm_dev_dbg(dev, state, "EARLY class "); 383 info = "EARLY class ";
441 error = pm_noirq_op(dev, dev->class->pm, state); 384 callback = pm_noirq_op(dev->class->pm, state);
442 } else if (dev->bus && dev->bus->pm) { 385 } else if (dev->bus && dev->bus->pm) {
443 pm_dev_dbg(dev, state, "EARLY "); 386 info = "EARLY bus ";
444 error = pm_noirq_op(dev, dev->bus->pm, state); 387 callback = pm_noirq_op(dev->bus->pm, state);
445 } 388 }
446 389
390 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver ";
392 callback = pm_noirq_op(dev->driver->pm, state);
393 }
394
395 error = dpm_run_callback(callback, dev, state, info);
396
447 TRACE_RESUME(error); 397 TRACE_RESUME(error);
448 return error; 398 return error;
449} 399}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
486EXPORT_SYMBOL_GPL(dpm_resume_noirq); 436EXPORT_SYMBOL_GPL(dpm_resume_noirq);
487 437
488/** 438/**
489 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
490 * @dev: Device to resume.
491 * @cb: Resume callback to execute.
492 */
493static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
494{
495 int error;
496 ktime_t calltime;
497
498 calltime = initcall_debug_start(dev);
499
500 error = cb(dev);
501 suspend_report_result(cb, error);
502
503 initcall_debug_report(dev, calltime, error);
504
505 return error;
506}
507
508/**
509 * device_resume - Execute "resume" callbacks for given device. 439 * device_resume - Execute "resume" callbacks for given device.
510 * @dev: Device to handle. 440 * @dev: Device to handle.
511 * @state: PM transition of the system being carried out. 441 * @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
513 */ 443 */
514static int device_resume(struct device *dev, pm_message_t state, bool async) 444static int device_resume(struct device *dev, pm_message_t state, bool async)
515{ 445{
446 pm_callback_t callback = NULL;
447 char *info = NULL;
516 int error = 0; 448 int error = 0;
517 bool put = false; 449 bool put = false;
518 450
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
535 put = true; 467 put = true;
536 468
537 if (dev->pm_domain) { 469 if (dev->pm_domain) {
538 pm_dev_dbg(dev, state, "power domain "); 470 info = "power domain ";
539 error = pm_op(dev, &dev->pm_domain->ops, state); 471 callback = pm_op(&dev->pm_domain->ops, state);
540 goto End; 472 goto Driver;
541 } 473 }
542 474
543 if (dev->type && dev->type->pm) { 475 if (dev->type && dev->type->pm) {
544 pm_dev_dbg(dev, state, "type "); 476 info = "type ";
545 error = pm_op(dev, dev->type->pm, state); 477 callback = pm_op(dev->type->pm, state);
546 goto End; 478 goto Driver;
547 } 479 }
548 480
549 if (dev->class) { 481 if (dev->class) {
550 if (dev->class->pm) { 482 if (dev->class->pm) {
551 pm_dev_dbg(dev, state, "class "); 483 info = "class ";
552 error = pm_op(dev, dev->class->pm, state); 484 callback = pm_op(dev->class->pm, state);
553 goto End; 485 goto Driver;
554 } else if (dev->class->resume) { 486 } else if (dev->class->resume) {
555 pm_dev_dbg(dev, state, "legacy class "); 487 info = "legacy class ";
556 error = legacy_resume(dev, dev->class->resume); 488 callback = dev->class->resume;
557 goto End; 489 goto End;
558 } 490 }
559 } 491 }
560 492
561 if (dev->bus) { 493 if (dev->bus) {
562 if (dev->bus->pm) { 494 if (dev->bus->pm) {
563 pm_dev_dbg(dev, state, ""); 495 info = "bus ";
564 error = pm_op(dev, dev->bus->pm, state); 496 callback = pm_op(dev->bus->pm, state);
565 } else if (dev->bus->resume) { 497 } else if (dev->bus->resume) {
566 pm_dev_dbg(dev, state, "legacy "); 498 info = "legacy bus ";
567 error = legacy_resume(dev, dev->bus->resume); 499 callback = dev->bus->resume;
500 goto End;
568 } 501 }
569 } 502 }
570 503
504 Driver:
505 if (!callback && dev->driver && dev->driver->pm) {
506 info = "driver ";
507 callback = pm_op(dev->driver->pm, state);
508 }
509
571 End: 510 End:
511 error = dpm_run_callback(callback, dev, state, info);
572 dev->power.is_suspended = false; 512 dev->power.is_suspended = false;
573 513
574 Unlock: 514 Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
660 */ 600 */
661static void device_complete(struct device *dev, pm_message_t state) 601static void device_complete(struct device *dev, pm_message_t state)
662{ 602{
603 void (*callback)(struct device *) = NULL;
604 char *info = NULL;
605
663 device_lock(dev); 606 device_lock(dev);
664 607
665 if (dev->pm_domain) { 608 if (dev->pm_domain) {
666 pm_dev_dbg(dev, state, "completing power domain "); 609 info = "completing power domain ";
667 if (dev->pm_domain->ops.complete) 610 callback = dev->pm_domain->ops.complete;
668 dev->pm_domain->ops.complete(dev);
669 } else if (dev->type && dev->type->pm) { 611 } else if (dev->type && dev->type->pm) {
670 pm_dev_dbg(dev, state, "completing type "); 612 info = "completing type ";
671 if (dev->type->pm->complete) 613 callback = dev->type->pm->complete;
672 dev->type->pm->complete(dev);
673 } else if (dev->class && dev->class->pm) { 614 } else if (dev->class && dev->class->pm) {
674 pm_dev_dbg(dev, state, "completing class "); 615 info = "completing class ";
675 if (dev->class->pm->complete) 616 callback = dev->class->pm->complete;
676 dev->class->pm->complete(dev);
677 } else if (dev->bus && dev->bus->pm) { 617 } else if (dev->bus && dev->bus->pm) {
678 pm_dev_dbg(dev, state, "completing "); 618 info = "completing bus ";
679 if (dev->bus->pm->complete) 619 callback = dev->bus->pm->complete;
680 dev->bus->pm->complete(dev); 620 }
621
622 if (!callback && dev->driver && dev->driver->pm) {
623 info = "completing driver ";
624 callback = dev->driver->pm->complete;
625 }
626
627 if (callback) {
628 pm_dev_dbg(dev, state, info);
629 callback(dev);
681 } 630 }
682 631
683 device_unlock(dev); 632 device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
763 */ 712 */
764static int device_suspend_noirq(struct device *dev, pm_message_t state) 713static int device_suspend_noirq(struct device *dev, pm_message_t state)
765{ 714{
766 int error; 715 pm_callback_t callback = NULL;
716 char *info = NULL;
767 717
768 if (dev->pm_domain) { 718 if (dev->pm_domain) {
769 pm_dev_dbg(dev, state, "LATE power domain "); 719 info = "LATE power domain ";
770 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 720 callback = pm_noirq_op(&dev->pm_domain->ops, state);
771 if (error)
772 return error;
773 } else if (dev->type && dev->type->pm) { 721 } else if (dev->type && dev->type->pm) {
774 pm_dev_dbg(dev, state, "LATE type "); 722 info = "LATE type ";
775 error = pm_noirq_op(dev, dev->type->pm, state); 723 callback = pm_noirq_op(dev->type->pm, state);
776 if (error)
777 return error;
778 } else if (dev->class && dev->class->pm) { 724 } else if (dev->class && dev->class->pm) {
779 pm_dev_dbg(dev, state, "LATE class "); 725 info = "LATE class ";
780 error = pm_noirq_op(dev, dev->class->pm, state); 726 callback = pm_noirq_op(dev->class->pm, state);
781 if (error)
782 return error;
783 } else if (dev->bus && dev->bus->pm) { 727 } else if (dev->bus && dev->bus->pm) {
784 pm_dev_dbg(dev, state, "LATE "); 728 info = "LATE bus ";
785 error = pm_noirq_op(dev, dev->bus->pm, state); 729 callback = pm_noirq_op(dev->bus->pm, state);
786 if (error)
787 return error;
788 } 730 }
789 731
790 return 0; 732 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver ";
734 callback = pm_noirq_op(dev->driver->pm, state);
735 }
736
737 return dpm_run_callback(callback, dev, state, info);
791} 738}
792 739
793/** 740/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
864 */ 811 */
865static int __device_suspend(struct device *dev, pm_message_t state, bool async) 812static int __device_suspend(struct device *dev, pm_message_t state, bool async)
866{ 813{
814 pm_callback_t callback = NULL;
815 char *info = NULL;
867 int error = 0; 816 int error = 0;
868 817
869 dpm_wait_for_children(dev, async); 818 dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
884 device_lock(dev); 833 device_lock(dev);
885 834
886 if (dev->pm_domain) { 835 if (dev->pm_domain) {
887 pm_dev_dbg(dev, state, "power domain "); 836 info = "power domain ";
888 error = pm_op(dev, &dev->pm_domain->ops, state); 837 callback = pm_op(&dev->pm_domain->ops, state);
889 goto End; 838 goto Run;
890 } 839 }
891 840
892 if (dev->type && dev->type->pm) { 841 if (dev->type && dev->type->pm) {
893 pm_dev_dbg(dev, state, "type "); 842 info = "type ";
894 error = pm_op(dev, dev->type->pm, state); 843 callback = pm_op(dev->type->pm, state);
895 goto End; 844 goto Run;
896 } 845 }
897 846
898 if (dev->class) { 847 if (dev->class) {
899 if (dev->class->pm) { 848 if (dev->class->pm) {
900 pm_dev_dbg(dev, state, "class "); 849 info = "class ";
901 error = pm_op(dev, dev->class->pm, state); 850 callback = pm_op(dev->class->pm, state);
902 goto End; 851 goto Run;
903 } else if (dev->class->suspend) { 852 } else if (dev->class->suspend) {
904 pm_dev_dbg(dev, state, "legacy class "); 853 pm_dev_dbg(dev, state, "legacy class ");
905 error = legacy_suspend(dev, state, dev->class->suspend); 854 error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
909 858
910 if (dev->bus) { 859 if (dev->bus) {
911 if (dev->bus->pm) { 860 if (dev->bus->pm) {
912 pm_dev_dbg(dev, state, ""); 861 info = "bus ";
913 error = pm_op(dev, dev->bus->pm, state); 862 callback = pm_op(dev->bus->pm, state);
914 } else if (dev->bus->suspend) { 863 } else if (dev->bus->suspend) {
915 pm_dev_dbg(dev, state, "legacy "); 864 pm_dev_dbg(dev, state, "legacy bus ");
916 error = legacy_suspend(dev, state, dev->bus->suspend); 865 error = legacy_suspend(dev, state, dev->bus->suspend);
866 goto End;
917 } 867 }
918 } 868 }
919 869
870 Run:
871 if (!callback && dev->driver && dev->driver->pm) {
872 info = "driver ";
873 callback = pm_op(dev->driver->pm, state);
874 }
875
876 error = dpm_run_callback(callback, dev, state, info);
877
920 End: 878 End:
921 if (!error) { 879 if (!error) {
922 dev->power.is_suspended = true; 880 dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
1022 */ 980 */
1023static int device_prepare(struct device *dev, pm_message_t state) 981static int device_prepare(struct device *dev, pm_message_t state)
1024{ 982{
983 int (*callback)(struct device *) = NULL;
984 char *info = NULL;
1025 int error = 0; 985 int error = 0;
1026 986
1027 device_lock(dev); 987 device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
1029 dev->power.wakeup_path = device_may_wakeup(dev); 989 dev->power.wakeup_path = device_may_wakeup(dev);
1030 990
1031 if (dev->pm_domain) { 991 if (dev->pm_domain) {
1032 pm_dev_dbg(dev, state, "preparing power domain "); 992 info = "preparing power domain ";
1033 if (dev->pm_domain->ops.prepare) 993 callback = dev->pm_domain->ops.prepare;
1034 error = dev->pm_domain->ops.prepare(dev);
1035 suspend_report_result(dev->pm_domain->ops.prepare, error);
1036 if (error)
1037 goto End;
1038 } else if (dev->type && dev->type->pm) { 994 } else if (dev->type && dev->type->pm) {
1039 pm_dev_dbg(dev, state, "preparing type "); 995 info = "preparing type ";
1040 if (dev->type->pm->prepare) 996 callback = dev->type->pm->prepare;
1041 error = dev->type->pm->prepare(dev);
1042 suspend_report_result(dev->type->pm->prepare, error);
1043 if (error)
1044 goto End;
1045 } else if (dev->class && dev->class->pm) { 997 } else if (dev->class && dev->class->pm) {
1046 pm_dev_dbg(dev, state, "preparing class "); 998 info = "preparing class ";
1047 if (dev->class->pm->prepare) 999 callback = dev->class->pm->prepare;
1048 error = dev->class->pm->prepare(dev);
1049 suspend_report_result(dev->class->pm->prepare, error);
1050 if (error)
1051 goto End;
1052 } else if (dev->bus && dev->bus->pm) { 1000 } else if (dev->bus && dev->bus->pm) {
1053 pm_dev_dbg(dev, state, "preparing "); 1001 info = "preparing bus ";
1054 if (dev->bus->pm->prepare) 1002 callback = dev->bus->pm->prepare;
1055 error = dev->bus->pm->prepare(dev); 1003 }
1056 suspend_report_result(dev->bus->pm->prepare, error); 1004
1005 if (!callback && dev->driver && dev->driver->pm) {
1006 info = "preparing driver ";
1007 callback = dev->driver->pm->prepare;
1008 }
1009
1010 if (callback) {
1011 error = callback(dev);
1012 suspend_report_result(callback, error);
1057 } 1013 }
1058 1014
1059 End:
1060 device_unlock(dev); 1015 device_unlock(dev);
1061 1016
1062 return error; 1017 return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c50fc41..c5d358837461 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
47static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 47static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
48 48
49/** 49/**
50 * dev_pm_qos_read_value - Get PM QoS constraint for a given device. 50 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
51 * @dev: Device to get the PM QoS constraint value for.
52 *
53 * This routine must be called with dev->power.lock held.
54 */
55s32 __dev_pm_qos_read_value(struct device *dev)
56{
57 struct pm_qos_constraints *c = dev->power.constraints;
58
59 return c ? pm_qos_read_value(c) : 0;
60}
61
62/**
63 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
51 * @dev: Device to get the PM QoS constraint value for. 64 * @dev: Device to get the PM QoS constraint value for.
52 */ 65 */
53s32 dev_pm_qos_read_value(struct device *dev) 66s32 dev_pm_qos_read_value(struct device *dev)
54{ 67{
55 struct pm_qos_constraints *c;
56 unsigned long flags; 68 unsigned long flags;
57 s32 ret = 0; 69 s32 ret;
58 70
59 spin_lock_irqsave(&dev->power.lock, flags); 71 spin_lock_irqsave(&dev->power.lock, flags);
60 72 ret = __dev_pm_qos_read_value(dev);
61 c = dev->power.constraints;
62 if (c)
63 ret = pm_qos_read_value(c);
64
65 spin_unlock_irqrestore(&dev->power.lock, flags); 73 spin_unlock_irqrestore(&dev->power.lock, flags);
66 74
67 return ret; 75 return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
412 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); 420 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
413} 421}
414EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); 422EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
423
424/**
425 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
426 * @dev: Device whose ancestor to add the request for.
427 * @req: Pointer to the preallocated handle.
428 * @value: Constraint latency value.
429 */
430int dev_pm_qos_add_ancestor_request(struct device *dev,
431 struct dev_pm_qos_request *req, s32 value)
432{
433 struct device *ancestor = dev->parent;
434 int error = -ENODEV;
435
436 while (ancestor && !ancestor->power.ignore_children)
437 ancestor = ancestor->parent;
438
439 if (ancestor)
440 error = dev_pm_qos_add_request(ancestor, req, value);
441
442 if (error)
443 req->dev = NULL;
444
445 return error;
446}
447EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8f..541f821d4ea6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
250 else 250 else
251 callback = NULL; 251 callback = NULL;
252 252
253 if (!callback && dev->driver && dev->driver->pm)
254 callback = dev->driver->pm->runtime_idle;
255
253 if (callback) 256 if (callback)
254 __rpm_callback(callback, dev); 257 __rpm_callback(callback, dev);
255 258
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
279 return retval != -EACCES ? retval : -EIO; 282 return retval != -EACCES ? retval : -EIO;
280} 283}
281 284
285struct rpm_qos_data {
286 ktime_t time_now;
287 s64 constraint_ns;
288};
289
290/**
291 * rpm_update_qos_constraint - Update a given PM QoS constraint data.
292 * @dev: Device whose timing data to use.
293 * @data: PM QoS constraint data to update.
294 *
295 * Use the suspend timing data of @dev to update PM QoS constraint data pointed
296 * to by @data.
297 */
298static int rpm_update_qos_constraint(struct device *dev, void *data)
299{
300 struct rpm_qos_data *qos = data;
301 unsigned long flags;
302 s64 delta_ns;
303 int ret = 0;
304
305 spin_lock_irqsave(&dev->power.lock, flags);
306
307 if (dev->power.max_time_suspended_ns < 0)
308 goto out;
309
310 delta_ns = dev->power.max_time_suspended_ns -
311 ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
312 if (delta_ns <= 0) {
313 ret = -EBUSY;
314 goto out;
315 }
316
317 if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
318 qos->constraint_ns = delta_ns;
319
320 out:
321 spin_unlock_irqrestore(&dev->power.lock, flags);
322
323 return ret;
324}
325
282/** 326/**
283 * rpm_suspend - Carry out runtime suspend of given device. 327 * rpm_suspend - Carry out runtime suspend of given device.
284 * @dev: Device to suspend. 328 * @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
305{ 349{
306 int (*callback)(struct device *); 350 int (*callback)(struct device *);
307 struct device *parent = NULL; 351 struct device *parent = NULL;
352 struct rpm_qos_data qos;
308 int retval; 353 int retval;
309 354
310 trace_rpm_suspend(dev, rpmflags); 355 trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 goto out; 445 goto out;
401 } 446 }
402 447
448 qos.constraint_ns = __dev_pm_qos_read_value(dev);
449 if (qos.constraint_ns < 0) {
450 /* Negative constraint means "never suspend". */
451 retval = -EPERM;
452 goto out;
453 }
454 qos.constraint_ns *= NSEC_PER_USEC;
455 qos.time_now = ktime_get();
456
403 __update_runtime_status(dev, RPM_SUSPENDING); 457 __update_runtime_status(dev, RPM_SUSPENDING);
404 458
459 if (!dev->power.ignore_children) {
460 if (dev->power.irq_safe)
461 spin_unlock(&dev->power.lock);
462 else
463 spin_unlock_irq(&dev->power.lock);
464
465 retval = device_for_each_child(dev, &qos,
466 rpm_update_qos_constraint);
467
468 if (dev->power.irq_safe)
469 spin_lock(&dev->power.lock);
470 else
471 spin_lock_irq(&dev->power.lock);
472
473 if (retval)
474 goto fail;
475 }
476
477 dev->power.suspend_time = qos.time_now;
478 dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
479
405 if (dev->pm_domain) 480 if (dev->pm_domain)
406 callback = dev->pm_domain->ops.runtime_suspend; 481 callback = dev->pm_domain->ops.runtime_suspend;
407 else if (dev->type && dev->type->pm) 482 else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
413 else 488 else
414 callback = NULL; 489 callback = NULL;
415 490
491 if (!callback && dev->driver && dev->driver->pm)
492 callback = dev->driver->pm->runtime_suspend;
493
416 retval = rpm_callback(callback, dev); 494 retval = rpm_callback(callback, dev);
417 if (retval) { 495 if (retval)
418 __update_runtime_status(dev, RPM_ACTIVE); 496 goto fail;
419 dev->power.deferred_resume = false;
420 if (retval == -EAGAIN || retval == -EBUSY) {
421 dev->power.runtime_error = 0;
422 497
423 /*
424 * If the callback routine failed an autosuspend, and
425 * if the last_busy time has been updated so that there
426 * is a new autosuspend expiration time, automatically
427 * reschedule another autosuspend.
428 */
429 if ((rpmflags & RPM_AUTO) &&
430 pm_runtime_autosuspend_expiration(dev) != 0)
431 goto repeat;
432 } else {
433 pm_runtime_cancel_pending(dev);
434 }
435 wake_up_all(&dev->power.wait_queue);
436 goto out;
437 }
438 no_callback: 498 no_callback:
439 __update_runtime_status(dev, RPM_SUSPENDED); 499 __update_runtime_status(dev, RPM_SUSPENDED);
440 pm_runtime_deactivate_timer(dev); 500 pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
466 trace_rpm_return_int(dev, _THIS_IP_, retval); 526 trace_rpm_return_int(dev, _THIS_IP_, retval);
467 527
468 return retval; 528 return retval;
529
530 fail:
531 __update_runtime_status(dev, RPM_ACTIVE);
532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false;
535 if (retval == -EAGAIN || retval == -EBUSY) {
536 dev->power.runtime_error = 0;
537
538 /*
539 * If the callback routine failed an autosuspend, and
540 * if the last_busy time has been updated so that there
541 * is a new autosuspend expiration time, automatically
542 * reschedule another autosuspend.
543 */
544 if ((rpmflags & RPM_AUTO) &&
545 pm_runtime_autosuspend_expiration(dev) != 0)
546 goto repeat;
547 } else {
548 pm_runtime_cancel_pending(dev);
549 }
550 wake_up_all(&dev->power.wait_queue);
551 goto out;
469} 552}
470 553
471/** 554/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
620 if (dev->power.no_callbacks) 703 if (dev->power.no_callbacks)
621 goto no_callback; /* Assume success. */ 704 goto no_callback; /* Assume success. */
622 705
706 dev->power.suspend_time = ktime_set(0, 0);
707 dev->power.max_time_suspended_ns = -1;
708
623 __update_runtime_status(dev, RPM_RESUMING); 709 __update_runtime_status(dev, RPM_RESUMING);
624 710
625 if (dev->pm_domain) 711 if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
633 else 719 else
634 callback = NULL; 720 callback = NULL;
635 721
722 if (!callback && dev->driver && dev->driver->pm)
723 callback = dev->driver->pm->runtime_resume;
724
636 retval = rpm_callback(callback, dev); 725 retval = rpm_callback(callback, dev);
637 if (retval) { 726 if (retval) {
638 __update_runtime_status(dev, RPM_SUSPENDED); 727 __update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
1279 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1368 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1280 (unsigned long)dev); 1369 (unsigned long)dev);
1281 1370
1371 dev->power.suspend_time = ktime_set(0, 0);
1372 dev->power.max_time_suspended_ns = -1;
1373
1282 init_waitqueue_head(&dev->power.wait_queue); 1374 init_waitqueue_head(&dev->power.wait_queue);
1283} 1375}
1284 1376
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
1296 if (dev->power.irq_safe && dev->parent) 1388 if (dev->power.irq_safe && dev->parent)
1297 pm_runtime_put_sync(dev->parent); 1389 pm_runtime_put_sync(dev->parent);
1298} 1390}
1391
1392/**
1393 * pm_runtime_update_max_time_suspended - Update device's suspend time data.
1394 * @dev: Device to handle.
1395 * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
1396 *
1397 * Update the device's power.max_time_suspended_ns field by subtracting
1398 * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
1399 * never negative.
1400 */
1401void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
1402{
1403 unsigned long flags;
1404
1405 spin_lock_irqsave(&dev->power.lock, flags);
1406
1407 if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
1408 if (dev->power.max_time_suspended_ns > delta_ns)
1409 dev->power.max_time_suspended_ns -= delta_ns;
1410 else
1411 dev->power.max_time_suspended_ns = 0;
1412 }
1413
1414 spin_unlock_irqrestore(&dev->power.lock, flags);
1415}