aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUlf Hansson <ulf.hansson@linaro.org>2017-02-08 07:39:00 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-02-08 19:01:26 -0500
commit0883ac038be12c4dba1a68a05030730f10442bc2 (patch)
tree4c1a38301b35bc50454aaff6f8c61d1174b94fde
parentf3c826ac26766f82769319db68f5b4337d6efc24 (diff)
PM / Domains: Fix asynchronous execution of *noirq() callbacks
As the PM core may invoke the *noirq() callbacks asynchronously, the current lock-less approach in genpd doesn't work. The consequence is that we may find concurrent operations racing to power on/off the PM domain. As of now, no immediate errors has been reported, but it's probably only a matter time. Therefor let's fix the problem now before this becomes a real issue, by deploying the locking scheme to the relevant functions. Reported-by: Brian Norris <briannorris@chromium.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/base/power/domain.c68
1 files changed, 39 insertions, 29 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index b04c14f2b440..3a75fb1b4126 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -729,16 +729,18 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
729/** 729/**
730 * genpd_sync_power_off - Synchronously power off a PM domain and its masters. 730 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
731 * @genpd: PM domain to power off, if possible. 731 * @genpd: PM domain to power off, if possible.
732 * @use_lock: use the lock.
733 * @depth: nesting count for lockdep.
732 * 734 *
733 * Check if the given PM domain can be powered off (during system suspend or 735 * Check if the given PM domain can be powered off (during system suspend or
734 * hibernation) and do that if so. Also, in that case propagate to its masters. 736 * hibernation) and do that if so. Also, in that case propagate to its masters.
735 * 737 *
736 * This function is only called in "noirq" and "syscore" stages of system power 738 * This function is only called in "noirq" and "syscore" stages of system power
737 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 739 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
738 * executed sequentially, so it is guaranteed that it will never run twice in 740 * these cases the lock must be held.
739 * parallel).
740 */ 741 */
741static void genpd_sync_power_off(struct generic_pm_domain *genpd) 742static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
743 unsigned int depth)
742{ 744{
743 struct gpd_link *link; 745 struct gpd_link *link;
744 746
@@ -757,20 +759,29 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd)
757 759
758 list_for_each_entry(link, &genpd->slave_links, slave_node) { 760 list_for_each_entry(link, &genpd->slave_links, slave_node) {
759 genpd_sd_counter_dec(link->master); 761 genpd_sd_counter_dec(link->master);
760 genpd_sync_power_off(link->master); 762
763 if (use_lock)
764 genpd_lock_nested(link->master, depth + 1);
765
766 genpd_sync_power_off(link->master, use_lock, depth + 1);
767
768 if (use_lock)
769 genpd_unlock(link->master);
761 } 770 }
762} 771}
763 772
764/** 773/**
765 * genpd_sync_power_on - Synchronously power on a PM domain and its masters. 774 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
766 * @genpd: PM domain to power on. 775 * @genpd: PM domain to power on.
776 * @use_lock: use the lock.
777 * @depth: nesting count for lockdep.
767 * 778 *
768 * This function is only called in "noirq" and "syscore" stages of system power 779 * This function is only called in "noirq" and "syscore" stages of system power
769 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 780 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
770 * executed sequentially, so it is guaranteed that it will never run twice in 781 * these cases the lock must be held.
771 * parallel).
772 */ 782 */
773static void genpd_sync_power_on(struct generic_pm_domain *genpd) 783static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
784 unsigned int depth)
774{ 785{
775 struct gpd_link *link; 786 struct gpd_link *link;
776 787
@@ -778,8 +789,15 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd)
778 return; 789 return;
779 790
780 list_for_each_entry(link, &genpd->slave_links, slave_node) { 791 list_for_each_entry(link, &genpd->slave_links, slave_node) {
781 genpd_sync_power_on(link->master);
782 genpd_sd_counter_inc(link->master); 792 genpd_sd_counter_inc(link->master);
793
794 if (use_lock)
795 genpd_lock_nested(link->master, depth + 1);
796
797 genpd_sync_power_on(link->master, use_lock, depth + 1);
798
799 if (use_lock)
800 genpd_unlock(link->master);
783 } 801 }
784 802
785 _genpd_power_on(genpd, false); 803 _genpd_power_on(genpd, false);
@@ -888,13 +906,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
888 return ret; 906 return ret;
889 } 907 }
890 908
891 /* 909 genpd_lock(genpd);
892 * Since all of the "noirq" callbacks are executed sequentially, it is
893 * guaranteed that this function will never run twice in parallel for
894 * the same PM domain, so it is not necessary to use locking here.
895 */
896 genpd->suspended_count++; 910 genpd->suspended_count++;
897 genpd_sync_power_off(genpd); 911 genpd_sync_power_off(genpd, true, 0);
912 genpd_unlock(genpd);
898 913
899 return 0; 914 return 0;
900} 915}
@@ -919,13 +934,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
919 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 934 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
920 return 0; 935 return 0;
921 936
922 /* 937 genpd_lock(genpd);
923 * Since all of the "noirq" callbacks are executed sequentially, it is 938 genpd_sync_power_on(genpd, true, 0);
924 * guaranteed that this function will never run twice in parallel for
925 * the same PM domain, so it is not necessary to use locking here.
926 */
927 genpd_sync_power_on(genpd);
928 genpd->suspended_count--; 939 genpd->suspended_count--;
940 genpd_unlock(genpd);
929 941
930 if (genpd->dev_ops.stop && genpd->dev_ops.start) 942 if (genpd->dev_ops.stop && genpd->dev_ops.start)
931 ret = pm_runtime_force_resume(dev); 943 ret = pm_runtime_force_resume(dev);
@@ -1002,13 +1014,10 @@ static int pm_genpd_restore_noirq(struct device *dev)
1002 return -EINVAL; 1014 return -EINVAL;
1003 1015
1004 /* 1016 /*
1005 * Since all of the "noirq" callbacks are executed sequentially, it is
1006 * guaranteed that this function will never run twice in parallel for
1007 * the same PM domain, so it is not necessary to use locking here.
1008 *
1009 * At this point suspended_count == 0 means we are being run for the 1017 * At this point suspended_count == 0 means we are being run for the
1010 * first time for the given domain in the present cycle. 1018 * first time for the given domain in the present cycle.
1011 */ 1019 */
1020 genpd_lock(genpd);
1012 if (genpd->suspended_count++ == 0) 1021 if (genpd->suspended_count++ == 0)
1013 /* 1022 /*
1014 * The boot kernel might put the domain into arbitrary state, 1023 * The boot kernel might put the domain into arbitrary state,
@@ -1017,7 +1026,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
1017 */ 1026 */
1018 genpd->status = GPD_STATE_POWER_OFF; 1027 genpd->status = GPD_STATE_POWER_OFF;
1019 1028
1020 genpd_sync_power_on(genpd); 1029 genpd_sync_power_on(genpd, true, 0);
1030 genpd_unlock(genpd);
1021 1031
1022 if (genpd->dev_ops.stop && genpd->dev_ops.start) 1032 if (genpd->dev_ops.stop && genpd->dev_ops.start)
1023 ret = pm_runtime_force_resume(dev); 1033 ret = pm_runtime_force_resume(dev);
@@ -1072,9 +1082,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
1072 1082
1073 if (suspend) { 1083 if (suspend) {
1074 genpd->suspended_count++; 1084 genpd->suspended_count++;
1075 genpd_sync_power_off(genpd); 1085 genpd_sync_power_off(genpd, false, 0);
1076 } else { 1086 } else {
1077 genpd_sync_power_on(genpd); 1087 genpd_sync_power_on(genpd, false, 0);
1078 genpd->suspended_count--; 1088 genpd->suspended_count--;
1079 } 1089 }
1080} 1090}