summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorUlf Hansson <ulf.hansson@linaro.org>2016-05-30 05:33:13 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-06-16 09:14:36 -0400
commit4d23a5e84806b202d9231929c9507ef7cf7a0185 (patch)
tree5575c3bdbd0cdcaf743203117340aa621d89f729 /drivers/base
parent9f5b52747dbf83816dcd29ea1700813aeb668c0f (diff)
PM / Domains: Allow runtime PM during system PM phases
In cases when a PM domain isn't powered off when genpd's ->prepare() callback is invoked, genpd runtime resumes and disables runtime PM for the device. This behaviour was needed when genpd managed intermediate states during the power off sequence, as to maintain proper low power states of devices during system PM suspend/resume. Commit ba2bbfbf6307 (PM / Domains: Remove intermediate states from the power off sequence), enables genpd to improve its behaviour in that respect. The PM core disables runtime PM at __device_suspend_late() before it calls a system PM "late" callback for a device. When resuming a device, after a corresponding "early" callback has been invoked, the PM core re-enables runtime PM. By changing genpd to allow runtime PM according to the same system PM phases as the PM core, devices can be runtime resumed by their corresponding subsystem/driver when really needed. In this way, genpd no longer need to runtime resume the device from its ->prepare() callback. In most cases that avoids unnecessary and energy- wasting operations of runtime resuming devices that have nothing to do, only to runtime suspend them shortly after. Although, because of changing this behaviour in genpd and due to that genpd powers on the PM domain unconditionally in the system PM resume "noirq" phase, it could potentially cause a PM domain to stay powered on even if it's unused after the system has resumed. To avoid this, schedule a power off work when genpd's system PM ->complete() callback has been invoked for the last device in the PM domain. Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Reviewed-by: Kevin Hilman <khilman@baylibre.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c34
1 files changed, 8 insertions, 26 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 60a9971fc474..4cb57f3f0ee3 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -739,21 +739,6 @@ static int pm_genpd_prepare(struct device *dev)
739 739
740 mutex_unlock(&genpd->lock); 740 mutex_unlock(&genpd->lock);
741 741
742 /*
743 * Even if the PM domain is powered off at this point, we can't expect
744 * it to remain in that state during the entire system PM suspend
745 * phase. Any subsystem/driver for a device in the PM domain, may still
746 * need to serve a request which may require the device to be runtime
747 * resumed and its PM domain to be powered.
748 *
749 * As we are disabling runtime PM at this point, we are preventing the
750 * subsystem/driver to decide themselves. For that reason, we need to
751 * make sure the device is operational as it may be required in some
752 * cases.
753 */
754 pm_runtime_resume(dev);
755 __pm_runtime_disable(dev, false);
756
757 ret = pm_generic_prepare(dev); 742 ret = pm_generic_prepare(dev);
758 if (ret) { 743 if (ret) {
759 mutex_lock(&genpd->lock); 744 mutex_lock(&genpd->lock);
@@ -761,7 +746,6 @@ static int pm_genpd_prepare(struct device *dev)
761 genpd->prepared_count--; 746 genpd->prepared_count--;
762 747
763 mutex_unlock(&genpd->lock); 748 mutex_unlock(&genpd->lock);
764 pm_runtime_enable(dev);
765 } 749 }
766 750
767 return ret; 751 return ret;
@@ -787,8 +771,6 @@ static int pm_genpd_suspend_noirq(struct device *dev)
787 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 771 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
788 return 0; 772 return 0;
789 773
790 genpd_stop_dev(genpd, dev);
791
792 /* 774 /*
793 * Since all of the "noirq" callbacks are executed sequentially, it is 775 * Since all of the "noirq" callbacks are executed sequentially, it is
794 * guaranteed that this function will never run twice in parallel for 776 * guaranteed that this function will never run twice in parallel for
@@ -827,7 +809,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
827 pm_genpd_sync_poweron(genpd, true); 809 pm_genpd_sync_poweron(genpd, true);
828 genpd->suspended_count--; 810 genpd->suspended_count--;
829 811
830 return genpd_start_dev(genpd, dev); 812 return 0;
831} 813}
832 814
833/** 815/**
@@ -849,7 +831,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
849 if (IS_ERR(genpd)) 831 if (IS_ERR(genpd))
850 return -EINVAL; 832 return -EINVAL;
851 833
852 return genpd_stop_dev(genpd, dev); 834 return 0;
853} 835}
854 836
855/** 837/**
@@ -869,7 +851,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
869 if (IS_ERR(genpd)) 851 if (IS_ERR(genpd))
870 return -EINVAL; 852 return -EINVAL;
871 853
872 return genpd_start_dev(genpd, dev); 854 return 0;
873} 855}
874 856
875/** 857/**
@@ -907,7 +889,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
907 889
908 pm_genpd_sync_poweron(genpd, true); 890 pm_genpd_sync_poweron(genpd, true);
909 891
910 return genpd_start_dev(genpd, dev); 892 return 0;
911} 893}
912 894
913/** 895/**
@@ -929,15 +911,15 @@ static void pm_genpd_complete(struct device *dev)
929 if (IS_ERR(genpd)) 911 if (IS_ERR(genpd))
930 return; 912 return;
931 913
914 pm_generic_complete(dev);
915
932 mutex_lock(&genpd->lock); 916 mutex_lock(&genpd->lock);
933 917
934 genpd->prepared_count--; 918 genpd->prepared_count--;
919 if (!genpd->prepared_count)
920 genpd_queue_power_off_work(genpd);
935 921
936 mutex_unlock(&genpd->lock); 922 mutex_unlock(&genpd->lock);
937
938 pm_generic_complete(dev);
939 pm_runtime_set_active(dev);
940 pm_runtime_enable(dev);
941} 923}
942 924
943/** 925/**