aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-12-25 17:43:11 -0500
committerRafael J. Wysocki <rjw@sisk.pl>2011-12-25 17:43:11 -0500
commit6d10463b2fa1b6b81091661c1917f26436b38c53 (patch)
tree1fb5be10a08a3178fb644c9eb5a2a31423985cfa /drivers/base/power
parent0015afaa1f818d38ea9f8e81a84a6aeeca5fdaf0 (diff)
parenta8cf27bee7adc40d91956cf1b9e44d7001f93aba (diff)
Merge branch 'pm-domains' into pm-for-linus
* pm-domains: PM / shmobile: Allow the A4R domain to be turned off at run time PM / input / touchscreen: Make st1232 use device PM QoS constraints PM / QoS: Introduce dev_pm_qos_add_ancestor_request() PM / shmobile: Remove the stay_on flag from SH7372's PM domains PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode ARM: S3C64XX: Implement basic power domain support PM / shmobile: Use common always on power domain governor PM / Domains: Provide an always on power domain governor PM / Domains: Fix default system suspend/resume operations PM / Domains: Make it possible to assign names to generic PM domains PM / Domains: fix compilation failure for CONFIG_PM_GENERIC_DOMAINS unset PM / Domains: Automatically update overoptimistic latency information PM / Domains: Add default power off governor function (v4) PM / Domains: Add device stop governor function (v4) PM / Domains: Rework system suspend callback routines (v2) PM / Domains: Introduce "save/restore state" device callbacks PM / Domains: Make it possible to use per-device domain callbacks
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c539
-rw-r--r--drivers/base/power/domain_governor.c156
-rw-r--r--drivers/base/power/qos.c25
4 files changed, 576 insertions, 146 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd17900..2e58ebb1f6c0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o 6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
7obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
8 8
9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5a..92e6a9048065 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/export.h>
19
20#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
21({ \
22 type (*__routine)(struct device *__d); \
23 type __ret = (type)0; \
24 \
25 __routine = genpd->dev_ops.callback; \
26 if (__routine) { \
27 __ret = __routine(dev); \
28 } else { \
29 __routine = dev_gpd_data(dev)->ops.callback; \
30 if (__routine) \
31 __ret = __routine(dev); \
32 } \
33 __ret; \
34})
35
36#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
37({ \
38 ktime_t __start = ktime_get(); \
39 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
40 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
41 struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
42 if (__elapsed > __gpd_data->td.field) { \
43 __gpd_data->td.field = __elapsed; \
44 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
45 __elapsed); \
46 } \
47 __retval; \
48})
18 49
19static LIST_HEAD(gpd_list); 50static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock); 51static DEFINE_MUTEX(gpd_list_lock);
21 52
22#ifdef CONFIG_PM 53#ifdef CONFIG_PM
23 54
24static struct generic_pm_domain *dev_to_genpd(struct device *dev) 55struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{ 56{
26 if (IS_ERR_OR_NULL(dev->pm_domain)) 57 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL); 58 return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
29 return pd_to_genpd(dev->pm_domain); 60 return pd_to_genpd(dev->pm_domain);
30} 61}
31 62
63static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
64{
65 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
66 stop_latency_ns, "stop");
67}
68
69static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
70{
71 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
72 start_latency_ns, "start");
73}
74
75static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
76{
77 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
78 save_state_latency_ns, "state save");
79}
80
81static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
82{
83 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
84 restore_state_latency_ns,
85 "state restore");
86}
87
32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 88static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{ 89{
34 bool ret = false; 90 bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
145 } 201 }
146 202
147 if (genpd->power_on) { 203 if (genpd->power_on) {
204 ktime_t time_start = ktime_get();
205 s64 elapsed_ns;
206
148 ret = genpd->power_on(genpd); 207 ret = genpd->power_on(genpd);
149 if (ret) 208 if (ret)
150 goto err; 209 goto err;
210
211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
212 if (elapsed_ns > genpd->power_on_latency_ns) {
213 genpd->power_on_latency_ns = elapsed_ns;
214 if (genpd->name)
215 pr_warning("%s: Power-on latency exceeded, "
216 "new value %lld ns\n", genpd->name,
217 elapsed_ns);
218 }
151 } 219 }
152 220
153 genpd_set_active(genpd); 221 genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
190{ 258{
191 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 259 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
192 struct device *dev = pdd->dev; 260 struct device *dev = pdd->dev;
193 struct device_driver *drv = dev->driver;
194 int ret = 0; 261 int ret = 0;
195 262
196 if (gpd_data->need_restore) 263 if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
198 265
199 mutex_unlock(&genpd->lock); 266 mutex_unlock(&genpd->lock);
200 267
201 if (drv && drv->pm && drv->pm->runtime_suspend) { 268 genpd_start_dev(genpd, dev);
202 if (genpd->start_device) 269 ret = genpd_save_dev(genpd, dev);
203 genpd->start_device(dev); 270 genpd_stop_dev(genpd, dev);
204
205 ret = drv->pm->runtime_suspend(dev);
206
207 if (genpd->stop_device)
208 genpd->stop_device(dev);
209 }
210 271
211 mutex_lock(&genpd->lock); 272 mutex_lock(&genpd->lock);
212 273
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
227{ 288{
228 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 289 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229 struct device *dev = pdd->dev; 290 struct device *dev = pdd->dev;
230 struct device_driver *drv = dev->driver;
231 291
232 if (!gpd_data->need_restore) 292 if (!gpd_data->need_restore)
233 return; 293 return;
234 294
235 mutex_unlock(&genpd->lock); 295 mutex_unlock(&genpd->lock);
236 296
237 if (drv && drv->pm && drv->pm->runtime_resume) { 297 genpd_start_dev(genpd, dev);
238 if (genpd->start_device) 298 genpd_restore_dev(genpd, dev);
239 genpd->start_device(dev); 299 genpd_stop_dev(genpd, dev);
240
241 drv->pm->runtime_resume(dev);
242
243 if (genpd->stop_device)
244 genpd->stop_device(dev);
245 }
246 300
247 mutex_lock(&genpd->lock); 301 mutex_lock(&genpd->lock);
248 302
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
354 } 408 }
355 409
356 if (genpd->power_off) { 410 if (genpd->power_off) {
411 ktime_t time_start;
412 s64 elapsed_ns;
413
357 if (atomic_read(&genpd->sd_count) > 0) { 414 if (atomic_read(&genpd->sd_count) > 0) {
358 ret = -EBUSY; 415 ret = -EBUSY;
359 goto out; 416 goto out;
360 } 417 }
361 418
419 time_start = ktime_get();
420
362 /* 421 /*
363 * If sd_count > 0 at this point, one of the subdomains hasn't 422 * If sd_count > 0 at this point, one of the subdomains hasn't
364 * managed to call pm_genpd_poweron() for the master yet after 423 * managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
372 genpd_set_active(genpd); 431 genpd_set_active(genpd);
373 goto out; 432 goto out;
374 } 433 }
434
435 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
436 if (elapsed_ns > genpd->power_off_latency_ns) {
437 genpd->power_off_latency_ns = elapsed_ns;
438 if (genpd->name)
439 pr_warning("%s: Power-off latency exceeded, "
440 "new value %lld ns\n", genpd->name,
441 elapsed_ns);
442 }
375 } 443 }
376 444
377 genpd->status = GPD_STATE_POWER_OFF; 445 genpd->status = GPD_STATE_POWER_OFF;
446 genpd->power_off_time = ktime_get();
447
448 /* Update PM QoS information for devices in the domain. */
449 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
450 struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
451
452 pm_runtime_update_max_time_suspended(pdd->dev,
453 td->start_latency_ns +
454 td->restore_state_latency_ns +
455 genpd->power_on_latency_ns);
456 }
378 457
379 list_for_each_entry(link, &genpd->slave_links, slave_node) { 458 list_for_each_entry(link, &genpd->slave_links, slave_node) {
380 genpd_sd_counter_dec(link->master); 459 genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
413static int pm_genpd_runtime_suspend(struct device *dev) 492static int pm_genpd_runtime_suspend(struct device *dev)
414{ 493{
415 struct generic_pm_domain *genpd; 494 struct generic_pm_domain *genpd;
495 bool (*stop_ok)(struct device *__dev);
496 int ret;
416 497
417 dev_dbg(dev, "%s()\n", __func__); 498 dev_dbg(dev, "%s()\n", __func__);
418 499
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
422 503
423 might_sleep_if(!genpd->dev_irq_safe); 504 might_sleep_if(!genpd->dev_irq_safe);
424 505
425 if (genpd->stop_device) { 506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
426 int ret = genpd->stop_device(dev); 507 if (stop_ok && !stop_ok(dev))
427 if (ret) 508 return -EBUSY;
428 return ret; 509
429 } 510 ret = genpd_stop_dev(genpd, dev);
511 if (ret)
512 return ret;
513
514 pm_runtime_update_max_time_suspended(dev,
515 dev_gpd_data(dev)->td.start_latency_ns);
430 516
431 /* 517 /*
432 * If power.irq_safe is set, this routine will be run with interrupts 518 * If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
502 mutex_unlock(&genpd->lock); 588 mutex_unlock(&genpd->lock);
503 589
504 out: 590 out:
505 if (genpd->start_device) 591 genpd_start_dev(genpd, dev);
506 genpd->start_device(dev);
507 592
508 return 0; 593 return 0;
509} 594}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
534 619
535#ifdef CONFIG_PM_SLEEP 620#ifdef CONFIG_PM_SLEEP
536 621
622static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
623 struct device *dev)
624{
625 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
626}
627
628static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
629{
630 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
631}
632
633static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
634{
635 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
636}
637
638static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
639{
640 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
641}
642
643static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
644{
645 return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
646}
647
648static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
649{
650 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
651}
652
653static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
654{
655 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
656}
657
658static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
659{
660 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
661}
662
663static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
664{
665 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
666}
667
537/** 668/**
538 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 669 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
539 * @genpd: PM domain to power off, if possible. 670 * @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
590 if (!device_can_wakeup(dev)) 721 if (!device_can_wakeup(dev))
591 return false; 722 return false;
592 723
593 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); 724 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
594 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 725 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
595} 726}
596 727
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
646 /* 777 /*
647 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 778 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
648 * so pm_genpd_poweron() will return immediately, but if the device 779 * so pm_genpd_poweron() will return immediately, but if the device
649 * is suspended (e.g. it's been stopped by .stop_device()), we need 780 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
650 * to make it operational. 781 * to make it operational.
651 */ 782 */
652 pm_runtime_resume(dev); 783 pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
685 if (IS_ERR(genpd)) 816 if (IS_ERR(genpd))
686 return -EINVAL; 817 return -EINVAL;
687 818
688 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 819 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
689} 820}
690 821
691/** 822/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
710 if (genpd->suspend_power_off) 841 if (genpd->suspend_power_off)
711 return 0; 842 return 0;
712 843
713 ret = pm_generic_suspend_noirq(dev); 844 ret = genpd_suspend_late(genpd, dev);
714 if (ret) 845 if (ret)
715 return ret; 846 return ret;
716 847
717 if (dev->power.wakeup_path 848 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
718 && genpd->active_wakeup && genpd->active_wakeup(dev))
719 return 0; 849 return 0;
720 850
721 if (genpd->stop_device) 851 genpd_stop_dev(genpd, dev);
722 genpd->stop_device(dev);
723 852
724 /* 853 /*
725 * Since all of the "noirq" callbacks are executed sequentially, it is 854 * Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
761 */ 890 */
762 pm_genpd_poweron(genpd); 891 pm_genpd_poweron(genpd);
763 genpd->suspended_count--; 892 genpd->suspended_count--;
764 if (genpd->start_device) 893 genpd_start_dev(genpd, dev);
765 genpd->start_device(dev);
766 894
767 return pm_generic_resume_noirq(dev); 895 return genpd_resume_early(genpd, dev);
768} 896}
769 897
770/** 898/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
785 if (IS_ERR(genpd)) 913 if (IS_ERR(genpd))
786 return -EINVAL; 914 return -EINVAL;
787 915
788 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 916 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
789} 917}
790 918
791/** 919/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
806 if (IS_ERR(genpd)) 934 if (IS_ERR(genpd))
807 return -EINVAL; 935 return -EINVAL;
808 936
809 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 937 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
810} 938}
811 939
812/** 940/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
832 if (genpd->suspend_power_off) 960 if (genpd->suspend_power_off)
833 return 0; 961 return 0;
834 962
835 ret = pm_generic_freeze_noirq(dev); 963 ret = genpd_freeze_late(genpd, dev);
836 if (ret) 964 if (ret)
837 return ret; 965 return ret;
838 966
839 if (genpd->stop_device) 967 genpd_stop_dev(genpd, dev);
840 genpd->stop_device(dev);
841 968
842 return 0; 969 return 0;
843} 970}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
864 if (genpd->suspend_power_off) 991 if (genpd->suspend_power_off)
865 return 0; 992 return 0;
866 993
867 if (genpd->start_device) 994 genpd_start_dev(genpd, dev);
868 genpd->start_device(dev);
869 995
870 return pm_generic_thaw_noirq(dev); 996 return genpd_thaw_early(genpd, dev);
871} 997}
872 998
873/** 999/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
888 if (IS_ERR(genpd)) 1014 if (IS_ERR(genpd))
889 return -EINVAL; 1015 return -EINVAL;
890 1016
891 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1017 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
892}
893
894/**
895 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
896 * @dev: Device to suspend.
897 *
898 * Power off a device under the assumption that its pm_domain field points to
899 * the domain member of an object of type struct generic_pm_domain representing
900 * a PM domain consisting of I/O devices.
901 */
902static int pm_genpd_dev_poweroff(struct device *dev)
903{
904 struct generic_pm_domain *genpd;
905
906 dev_dbg(dev, "%s()\n", __func__);
907
908 genpd = dev_to_genpd(dev);
909 if (IS_ERR(genpd))
910 return -EINVAL;
911
912 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
913}
914
915/**
916 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
917 * @dev: Device to suspend.
918 *
919 * Carry out a late powering off of a device under the assumption that its
920 * pm_domain field points to the domain member of an object of type
921 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
922 */
923static int pm_genpd_dev_poweroff_noirq(struct device *dev)
924{
925 struct generic_pm_domain *genpd;
926 int ret;
927
928 dev_dbg(dev, "%s()\n", __func__);
929
930 genpd = dev_to_genpd(dev);
931 if (IS_ERR(genpd))
932 return -EINVAL;
933
934 if (genpd->suspend_power_off)
935 return 0;
936
937 ret = pm_generic_poweroff_noirq(dev);
938 if (ret)
939 return ret;
940
941 if (dev->power.wakeup_path
942 && genpd->active_wakeup && genpd->active_wakeup(dev))
943 return 0;
944
945 if (genpd->stop_device)
946 genpd->stop_device(dev);
947
948 /*
949 * Since all of the "noirq" callbacks are executed sequentially, it is
950 * guaranteed that this function will never run twice in parallel for
951 * the same PM domain, so it is not necessary to use locking here.
952 */
953 genpd->suspended_count++;
954 pm_genpd_sync_poweroff(genpd);
955
956 return 0;
957} 1018}
958 1019
959/** 1020/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
993 1054
994 pm_genpd_poweron(genpd); 1055 pm_genpd_poweron(genpd);
995 genpd->suspended_count--; 1056 genpd->suspended_count--;
996 if (genpd->start_device) 1057 genpd_start_dev(genpd, dev);
997 genpd->start_device(dev);
998
999 return pm_generic_restore_noirq(dev);
1000}
1001
1002/**
1003 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1004 * @dev: Device to resume.
1005 *
1006 * Restore a device under the assumption that its pm_domain field points to the
1007 * domain member of an object of type struct generic_pm_domain representing
1008 * a power domain consisting of I/O devices.
1009 */
1010static int pm_genpd_restore(struct device *dev)
1011{
1012 struct generic_pm_domain *genpd;
1013
1014 dev_dbg(dev, "%s()\n", __func__);
1015
1016 genpd = dev_to_genpd(dev);
1017 if (IS_ERR(genpd))
1018 return -EINVAL;
1019 1058
1020 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); 1059 return genpd_resume_early(genpd, dev);
1021} 1060}
1022 1061
1023/** 1062/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
1067#define pm_genpd_freeze_noirq NULL 1106#define pm_genpd_freeze_noirq NULL
1068#define pm_genpd_thaw_noirq NULL 1107#define pm_genpd_thaw_noirq NULL
1069#define pm_genpd_thaw NULL 1108#define pm_genpd_thaw NULL
1070#define pm_genpd_dev_poweroff_noirq NULL
1071#define pm_genpd_dev_poweroff NULL
1072#define pm_genpd_restore_noirq NULL 1109#define pm_genpd_restore_noirq NULL
1073#define pm_genpd_restore NULL
1074#define pm_genpd_complete NULL 1110#define pm_genpd_complete NULL
1075 1111
1076#endif /* CONFIG_PM_SLEEP */ 1112#endif /* CONFIG_PM_SLEEP */
1077 1113
1078/** 1114/**
1079 * pm_genpd_add_device - Add a device to an I/O PM domain. 1115 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1080 * @genpd: PM domain to add the device to. 1116 * @genpd: PM domain to add the device to.
1081 * @dev: Device to be added. 1117 * @dev: Device to be added.
1118 * @td: Set of PM QoS timing parameters to attach to the device.
1082 */ 1119 */
1083int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1120int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1121 struct gpd_timing_data *td)
1084{ 1122{
1085 struct generic_pm_domain_data *gpd_data; 1123 struct generic_pm_domain_data *gpd_data;
1086 struct pm_domain_data *pdd; 1124 struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1123 gpd_data->base.dev = dev; 1161 gpd_data->base.dev = dev;
1124 gpd_data->need_restore = false; 1162 gpd_data->need_restore = false;
1125 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1163 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1164 if (td)
1165 gpd_data->td = *td;
1126 1166
1127 out: 1167 out:
1128 genpd_release_lock(genpd); 1168 genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1280} 1320}
1281 1321
1282/** 1322/**
1323 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1324 * @dev: Device to add the callbacks to.
1325 * @ops: Set of callbacks to add.
1326 * @td: Timing data to add to the device along with the callbacks (optional).
1327 */
1328int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1329 struct gpd_timing_data *td)
1330{
1331 struct pm_domain_data *pdd;
1332 int ret = 0;
1333
1334 if (!(dev && dev->power.subsys_data && ops))
1335 return -EINVAL;
1336
1337 pm_runtime_disable(dev);
1338 device_pm_lock();
1339
1340 pdd = dev->power.subsys_data->domain_data;
1341 if (pdd) {
1342 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1343
1344 gpd_data->ops = *ops;
1345 if (td)
1346 gpd_data->td = *td;
1347 } else {
1348 ret = -EINVAL;
1349 }
1350
1351 device_pm_unlock();
1352 pm_runtime_enable(dev);
1353
1354 return ret;
1355}
1356EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1357
1358/**
1359 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1360 * @dev: Device to remove the callbacks from.
1361 * @clear_td: If set, clear the device's timing data too.
1362 */
1363int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1364{
1365 struct pm_domain_data *pdd;
1366 int ret = 0;
1367
1368 if (!(dev && dev->power.subsys_data))
1369 return -EINVAL;
1370
1371 pm_runtime_disable(dev);
1372 device_pm_lock();
1373
1374 pdd = dev->power.subsys_data->domain_data;
1375 if (pdd) {
1376 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1377
1378 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1379 if (clear_td)
1380 gpd_data->td = (struct gpd_timing_data){ 0 };
1381 } else {
1382 ret = -EINVAL;
1383 }
1384
1385 device_pm_unlock();
1386 pm_runtime_enable(dev);
1387
1388 return ret;
1389}
1390EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1391
1392/* Default device callbacks for generic PM domains. */
1393
1394/**
1395 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1396 * @dev: Device to handle.
1397 */
1398static int pm_genpd_default_save_state(struct device *dev)
1399{
1400 int (*cb)(struct device *__dev);
1401 struct device_driver *drv = dev->driver;
1402
1403 cb = dev_gpd_data(dev)->ops.save_state;
1404 if (cb)
1405 return cb(dev);
1406
1407 if (drv && drv->pm && drv->pm->runtime_suspend)
1408 return drv->pm->runtime_suspend(dev);
1409
1410 return 0;
1411}
1412
1413/**
1414 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1415 * @dev: Device to handle.
1416 */
1417static int pm_genpd_default_restore_state(struct device *dev)
1418{
1419 int (*cb)(struct device *__dev);
1420 struct device_driver *drv = dev->driver;
1421
1422 cb = dev_gpd_data(dev)->ops.restore_state;
1423 if (cb)
1424 return cb(dev);
1425
1426 if (drv && drv->pm && drv->pm->runtime_resume)
1427 return drv->pm->runtime_resume(dev);
1428
1429 return 0;
1430}
1431
1432/**
1433 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1434 * @dev: Device to handle.
1435 */
1436static int pm_genpd_default_suspend(struct device *dev)
1437{
1438 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1439
1440 return cb ? cb(dev) : pm_generic_suspend(dev);
1441}
1442
1443/**
1444 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1445 * @dev: Device to handle.
1446 */
1447static int pm_genpd_default_suspend_late(struct device *dev)
1448{
1449 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1450
1451 return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
1452}
1453
1454/**
1455 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1456 * @dev: Device to handle.
1457 */
1458static int pm_genpd_default_resume_early(struct device *dev)
1459{
1460 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1461
1462 return cb ? cb(dev) : pm_generic_resume_noirq(dev);
1463}
1464
1465/**
1466 * pm_genpd_default_resume - Default "device resume" for PM domians.
1467 * @dev: Device to handle.
1468 */
1469static int pm_genpd_default_resume(struct device *dev)
1470{
1471 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1472
1473 return cb ? cb(dev) : pm_generic_resume(dev);
1474}
1475
1476/**
1477 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1478 * @dev: Device to handle.
1479 */
1480static int pm_genpd_default_freeze(struct device *dev)
1481{
1482 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1483
1484 return cb ? cb(dev) : pm_generic_freeze(dev);
1485}
1486
1487/**
1488 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1489 * @dev: Device to handle.
1490 */
1491static int pm_genpd_default_freeze_late(struct device *dev)
1492{
1493 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1494
1495 return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
1496}
1497
1498/**
1499 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1500 * @dev: Device to handle.
1501 */
1502static int pm_genpd_default_thaw_early(struct device *dev)
1503{
1504 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1505
1506 return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
1507}
1508
1509/**
1510 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1511 * @dev: Device to handle.
1512 */
1513static int pm_genpd_default_thaw(struct device *dev)
1514{
1515 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1516
1517 return cb ? cb(dev) : pm_generic_thaw(dev);
1518}
1519
1520/**
1283 * pm_genpd_init - Initialize a generic I/O PM domain object. 1521 * pm_genpd_init - Initialize a generic I/O PM domain object.
1284 * @genpd: PM domain object to initialize. 1522 * @genpd: PM domain object to initialize.
1285 * @gov: PM domain governor to associate with the domain (may be NULL). 1523 * @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1305 genpd->resume_count = 0; 1543 genpd->resume_count = 0;
1306 genpd->device_count = 0; 1544 genpd->device_count = 0;
1307 genpd->suspended_count = 0; 1545 genpd->suspended_count = 0;
1546 genpd->max_off_time_ns = -1;
1308 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1547 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1309 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1548 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1310 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1549 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1556 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1557 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.thaw = pm_genpd_thaw; 1558 genpd->domain.ops.thaw = pm_genpd_thaw;
1320 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; 1559 genpd->domain.ops.poweroff = pm_genpd_suspend;
1321 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; 1560 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1322 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1561 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1323 genpd->domain.ops.restore = pm_genpd_restore; 1562 genpd->domain.ops.restore = pm_genpd_resume;
1324 genpd->domain.ops.complete = pm_genpd_complete; 1563 genpd->domain.ops.complete = pm_genpd_complete;
1564 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1565 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1566 genpd->dev_ops.suspend = pm_genpd_default_suspend;
1567 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1568 genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1569 genpd->dev_ops.resume = pm_genpd_default_resume;
1570 genpd->dev_ops.freeze = pm_genpd_default_freeze;
1571 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1572 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1573 genpd->dev_ops.thaw = pm_genpd_default_thaw;
1325 mutex_lock(&gpd_list_lock); 1574 mutex_lock(&gpd_list_lock);
1326 list_add(&genpd->gpd_list_node, &gpd_list); 1575 list_add(&genpd->gpd_list_node, &gpd_list);
1327 mutex_unlock(&gpd_list_lock); 1576 mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000000..51527ee92d10
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
1/*
2 * drivers/base/power/domain_governor.c - Governors for device PM domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/pm_domain.h>
12#include <linux/pm_qos.h>
13#include <linux/hrtimer.h>
14
15/**
16 * default_stop_ok - Default PM domain governor routine for stopping devices.
17 * @dev: Device to check.
18 */
19bool default_stop_ok(struct device *dev)
20{
21 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
22
23 dev_dbg(dev, "%s()\n", __func__);
24
25 if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
26 return true;
27
28 return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
29 && td->break_even_ns < dev->power.max_time_suspended_ns;
30}
31
32/**
33 * default_power_down_ok - Default generic PM domain power off governor routine.
34 * @pd: PM domain to check.
35 *
36 * This routine must be executed under the PM domain's lock.
37 */
38static bool default_power_down_ok(struct dev_pm_domain *pd)
39{
40 struct generic_pm_domain *genpd = pd_to_genpd(pd);
41 struct gpd_link *link;
42 struct pm_domain_data *pdd;
43 s64 min_dev_off_time_ns;
44 s64 off_on_time_ns;
45 ktime_t time_now = ktime_get();
46
47 off_on_time_ns = genpd->power_off_latency_ns +
48 genpd->power_on_latency_ns;
49 /*
50 * It doesn't make sense to remove power from the domain if saving
51 * the state of all devices in it and the power off/power on operations
52 * take too much time.
53 *
54 * All devices in this domain have been stopped already at this point.
55 */
56 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
57 if (pdd->dev->driver)
58 off_on_time_ns +=
59 to_gpd_data(pdd)->td.save_state_latency_ns;
60 }
61
62 /*
63 * Check if subdomains can be off for enough time.
64 *
65 * All subdomains have been powered off already at this point.
66 */
67 list_for_each_entry(link, &genpd->master_links, master_node) {
68 struct generic_pm_domain *sd = link->slave;
69 s64 sd_max_off_ns = sd->max_off_time_ns;
70
71 if (sd_max_off_ns < 0)
72 continue;
73
74 sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
75 sd->power_off_time));
76 /*
77 * Check if the subdomain is allowed to be off long enough for
78 * the current domain to turn off and on (that's how much time
79 * it will have to wait worst case).
80 */
81 if (sd_max_off_ns <= off_on_time_ns)
82 return false;
83 }
84
85 /*
86 * Check if the devices in the domain can be off enough time.
87 */
88 min_dev_off_time_ns = -1;
89 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
90 struct gpd_timing_data *td;
91 struct device *dev = pdd->dev;
92 s64 dev_off_time_ns;
93
94 if (!dev->driver || dev->power.max_time_suspended_ns < 0)
95 continue;
96
97 td = &to_gpd_data(pdd)->td;
98 dev_off_time_ns = dev->power.max_time_suspended_ns -
99 (td->start_latency_ns + td->restore_state_latency_ns +
100 ktime_to_ns(ktime_sub(time_now,
101 dev->power.suspend_time)));
102 if (dev_off_time_ns <= off_on_time_ns)
103 return false;
104
105 if (min_dev_off_time_ns > dev_off_time_ns
106 || min_dev_off_time_ns < 0)
107 min_dev_off_time_ns = dev_off_time_ns;
108 }
109
110 if (min_dev_off_time_ns < 0) {
111 /*
112 * There are no latency constraints, so the domain can spend
113 * arbitrary time in the "off" state.
114 */
115 genpd->max_off_time_ns = -1;
116 return true;
117 }
118
119 /*
120 * The difference between the computed minimum delta and the time needed
121 * to turn the domain on is the maximum theoretical time this domain can
122 * spend in the "off" state.
123 */
124 min_dev_off_time_ns -= genpd->power_on_latency_ns;
125
126 /*
127 * If the difference between the computed minimum delta and the time
128 * needed to turn the domain off and back on on is smaller than the
129 * domain's power break even time, removing power from the domain is not
130 * worth it.
131 */
132 if (genpd->break_even_ns >
133 min_dev_off_time_ns - genpd->power_off_latency_ns)
134 return false;
135
136 genpd->max_off_time_ns = min_dev_off_time_ns;
137 return true;
138}
139
140struct dev_power_governor simple_qos_governor = {
141 .stop_ok = default_stop_ok,
142 .power_down_ok = default_power_down_ok,
143};
144
145static bool always_on_power_down_ok(struct dev_pm_domain *domain)
146{
147 return false;
148}
149
150/**
151 * pm_genpd_gov_always_on - A governor implementing an always-on policy
152 */
153struct dev_power_governor pm_domain_always_on_gov = {
154 .power_down_ok = always_on_power_down_ok,
155 .stop_ok = default_stop_ok,
156};
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 03f4bd069ca8..c5d358837461 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -420,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
420 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); 420 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
421} 421}
422EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); 422EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
423
424/**
425 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
426 * @dev: Device whose ancestor to add the request for.
427 * @req: Pointer to the preallocated handle.
428 * @value: Constraint latency value.
429 */
430int dev_pm_qos_add_ancestor_request(struct device *dev,
431 struct dev_pm_qos_request *req, s32 value)
432{
433 struct device *ancestor = dev->parent;
434 int error = -ENODEV;
435
436 while (ancestor && !ancestor->power.ignore_children)
437 ancestor = ancestor->parent;
438
439 if (ancestor)
440 error = dev_pm_qos_add_request(ancestor, req, value);
441
442 if (error)
443 req->dev = NULL;
444
445 return error;
446}
447EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);