aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 13:15:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 13:15:51 -0400
commitc7c66c0cb0c77b1a8edf09bca57d922312d58030 (patch)
tree77277103c5f16aa4dee64978a060933d92e14776 /drivers
parent9f3938346a5c1fa504647670edb5fea5756cfb00 (diff)
parent98e8bdafeb4728a6af7bbcbcc3984967d1cf2bc1 (diff)
Merge tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates for 3.4 from Rafael Wysocki: "Assorted extensions and fixes including: * Introduction of early/late suspend/hibernation device callbacks. * Generic PM domains extensions and fixes. * devfreq updates from Axel Lin and MyungJoo Ham. * Device PM QoS updates. * Fixes of concurrency problems with wakeup sources. * System suspend and hibernation fixes." * tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (43 commits) PM / Domains: Check domain status during hibernation restore of devices PM / devfreq: add relation of recommended frequency. PM / shmobile: Make MTU2 driver use pm_genpd_dev_always_on() PM / shmobile: Make CMT driver use pm_genpd_dev_always_on() PM / shmobile: Make TMU driver use pm_genpd_dev_always_on() PM / Domains: Introduce "always on" device flag PM / Domains: Fix hibernation restore of devices, v2 PM / Domains: Fix handling of wakeup devices during system resume sh_mmcif / PM: Use PM QoS latency constraint tmio_mmc / PM: Use PM QoS latency constraint PM / QoS: Make it possible to expose PM QoS latency constraints PM / Sleep: JBD and JBD2 missing set_freezable() PM / Domains: Fix include for PM_GENERIC_DOMAINS=n case PM / Freezer: Remove references to TIF_FREEZE in comments PM / Sleep: Add more wakeup source initialization routines PM / Hibernate: Enable usermodehelpers in hibernate() error path PM / Sleep: Make __pm_stay_awake() delete wakeup source timers PM / Sleep: Fix race conditions related to wakeup source timer function PM / Sleep: Fix possible infinite loop during wakeup source destruction PM / Hibernate: print physical addresses consistently with other parts of kernel ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/domain.c253
-rw-r--r--drivers/base/power/generic_ops.c157
-rw-r--r--drivers/base/power/main.c247
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c61
-rw-r--r--drivers/base/power/sysfs.c47
-rw-r--r--drivers/base/power/wakeup.c85
-rw-r--r--drivers/clocksource/sh_cmt.c4
-rw-r--r--drivers/clocksource/sh_mtu2.c4
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/devfreq/devfreq.c112
-rw-r--r--drivers/devfreq/exynos4_bus.c23
-rw-r--r--drivers/devfreq/governor_performance.c5
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c12
-rw-r--r--drivers/devfreq/governor_userspace.c15
-rw-r--r--drivers/mmc/host/sh_mmcif.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c5
-rw-r--r--drivers/xen/manage.c6
19 files changed, 869 insertions, 182 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 978bbf7ac6af..73ce9fbe9839 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
366 not_suspended = 0; 366 not_suspended = 0;
367 list_for_each_entry(pdd, &genpd->dev_list, list_node) 367 list_for_each_entry(pdd, &genpd->dev_list, list_node)
368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
369 || pdd->dev->power.irq_safe)) 369 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
370 not_suspended++; 370 not_suspended++;
371 371
372 if (not_suspended > genpd->in_progress) 372 if (not_suspended > genpd->in_progress)
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
503 503
504 might_sleep_if(!genpd->dev_irq_safe); 504 might_sleep_if(!genpd->dev_irq_safe);
505 505
506 if (dev_gpd_data(dev)->always_on)
507 return -EBUSY;
508
506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 509 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
507 if (stop_ok && !stop_ok(dev)) 510 if (stop_ok && !stop_ok(dev))
508 return -EBUSY; 511 return -EBUSY;
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev)
764 767
765 genpd_acquire_lock(genpd); 768 genpd_acquire_lock(genpd);
766 769
767 if (genpd->prepared_count++ == 0) 770 if (genpd->prepared_count++ == 0) {
771 genpd->suspended_count = 0;
768 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 772 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
773 }
769 774
770 genpd_release_lock(genpd); 775 genpd_release_lock(genpd);
771 776
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev)
820} 825}
821 826
822/** 827/**
823 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. 828 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
824 * @dev: Device to suspend. 829 * @dev: Device to suspend.
825 * 830 *
826 * Carry out a late suspend of a device under the assumption that its 831 * Carry out a late suspend of a device under the assumption that its
827 * pm_domain field points to the domain member of an object of type 832 * pm_domain field points to the domain member of an object of type
828 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 833 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
829 */ 834 */
830static int pm_genpd_suspend_noirq(struct device *dev) 835static int pm_genpd_suspend_late(struct device *dev)
831{ 836{
832 struct generic_pm_domain *genpd; 837 struct generic_pm_domain *genpd;
833 int ret;
834 838
835 dev_dbg(dev, "%s()\n", __func__); 839 dev_dbg(dev, "%s()\n", __func__);
836 840
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
838 if (IS_ERR(genpd)) 842 if (IS_ERR(genpd))
839 return -EINVAL; 843 return -EINVAL;
840 844
841 if (genpd->suspend_power_off) 845 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
842 return 0; 846}
843 847
844 ret = genpd_suspend_late(genpd, dev); 848/**
845 if (ret) 849 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
846 return ret; 850 * @dev: Device to suspend.
851 *
852 * Stop the device and remove power from the domain if all devices in it have
853 * been stopped.
854 */
855static int pm_genpd_suspend_noirq(struct device *dev)
856{
857 struct generic_pm_domain *genpd;
847 858
848 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 859 dev_dbg(dev, "%s()\n", __func__);
860
861 genpd = dev_to_genpd(dev);
862 if (IS_ERR(genpd))
863 return -EINVAL;
864
865 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
866 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
849 return 0; 867 return 0;
850 868
851 genpd_stop_dev(genpd, dev); 869 genpd_stop_dev(genpd, dev);
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
862} 880}
863 881
864/** 882/**
865 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. 883 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
866 * @dev: Device to resume. 884 * @dev: Device to resume.
867 * 885 *
868 * Carry out an early resume of a device under the assumption that its 886 * Restore power to the device's PM domain, if necessary, and start the device.
869 * pm_domain field points to the domain member of an object of type
870 * struct generic_pm_domain representing a power domain consisting of I/O
871 * devices.
872 */ 887 */
873static int pm_genpd_resume_noirq(struct device *dev) 888static int pm_genpd_resume_noirq(struct device *dev)
874{ 889{
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
880 if (IS_ERR(genpd)) 895 if (IS_ERR(genpd))
881 return -EINVAL; 896 return -EINVAL;
882 897
883 if (genpd->suspend_power_off) 898 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
899 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
884 return 0; 900 return 0;
885 901
886 /* 902 /*
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
890 */ 906 */
891 pm_genpd_poweron(genpd); 907 pm_genpd_poweron(genpd);
892 genpd->suspended_count--; 908 genpd->suspended_count--;
893 genpd_start_dev(genpd, dev);
894 909
895 return genpd_resume_early(genpd, dev); 910 return genpd_start_dev(genpd, dev);
896} 911}
897 912
898/** 913/**
899 * pm_genpd_resume - Resume a device belonging to an I/O power domain. 914 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
915 * @dev: Device to resume.
916 *
917 * Carry out an early resume of a device under the assumption that its
918 * pm_domain field points to the domain member of an object of type
919 * struct generic_pm_domain representing a power domain consisting of I/O
920 * devices.
921 */
922static int pm_genpd_resume_early(struct device *dev)
923{
924 struct generic_pm_domain *genpd;
925
926 dev_dbg(dev, "%s()\n", __func__);
927
928 genpd = dev_to_genpd(dev);
929 if (IS_ERR(genpd))
930 return -EINVAL;
931
932 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
933}
934
935/**
936 * pm_genpd_resume - Resume of device in an I/O PM domain.
900 * @dev: Device to resume. 937 * @dev: Device to resume.
901 * 938 *
902 * Resume a device under the assumption that its pm_domain field points to the 939 * Resume a device under the assumption that its pm_domain field points to the
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev)
917} 954}
918 955
919/** 956/**
920 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. 957 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
921 * @dev: Device to freeze. 958 * @dev: Device to freeze.
922 * 959 *
923 * Freeze a device under the assumption that its pm_domain field points to the 960 * Freeze a device under the assumption that its pm_domain field points to the
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev)
938} 975}
939 976
940/** 977/**
941 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. 978 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
979 * @dev: Device to freeze.
980 *
981 * Carry out a late freeze of a device under the assumption that its
982 * pm_domain field points to the domain member of an object of type
983 * struct generic_pm_domain representing a power domain consisting of I/O
984 * devices.
985 */
986static int pm_genpd_freeze_late(struct device *dev)
987{
988 struct generic_pm_domain *genpd;
989
990 dev_dbg(dev, "%s()\n", __func__);
991
992 genpd = dev_to_genpd(dev);
993 if (IS_ERR(genpd))
994 return -EINVAL;
995
996 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
997}
998
999/**
1000 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
942 * @dev: Device to freeze. 1001 * @dev: Device to freeze.
943 * 1002 *
944 * Carry out a late freeze of a device under the assumption that its 1003 * Carry out a late freeze of a device under the assumption that its
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev)
949static int pm_genpd_freeze_noirq(struct device *dev) 1008static int pm_genpd_freeze_noirq(struct device *dev)
950{ 1009{
951 struct generic_pm_domain *genpd; 1010 struct generic_pm_domain *genpd;
952 int ret;
953 1011
954 dev_dbg(dev, "%s()\n", __func__); 1012 dev_dbg(dev, "%s()\n", __func__);
955 1013
@@ -957,20 +1015,33 @@ static int pm_genpd_freeze_noirq(struct device *dev)
957 if (IS_ERR(genpd)) 1015 if (IS_ERR(genpd))
958 return -EINVAL; 1016 return -EINVAL;
959 1017
960 if (genpd->suspend_power_off) 1018 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
961 return 0; 1019 0 : genpd_stop_dev(genpd, dev);
1020}
962 1021
963 ret = genpd_freeze_late(genpd, dev); 1022/**
964 if (ret) 1023 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
965 return ret; 1024 * @dev: Device to thaw.
1025 *
1026 * Start the device, unless power has been removed from the domain already
1027 * before the system transition.
1028 */
1029static int pm_genpd_thaw_noirq(struct device *dev)
1030{
1031 struct generic_pm_domain *genpd;
966 1032
967 genpd_stop_dev(genpd, dev); 1033 dev_dbg(dev, "%s()\n", __func__);
968 1034
969 return 0; 1035 genpd = dev_to_genpd(dev);
1036 if (IS_ERR(genpd))
1037 return -EINVAL;
1038
1039 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1040 0 : genpd_start_dev(genpd, dev);
970} 1041}
971 1042
972/** 1043/**
973 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. 1044 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
974 * @dev: Device to thaw. 1045 * @dev: Device to thaw.
975 * 1046 *
976 * Carry out an early thaw of a device under the assumption that its 1047 * Carry out an early thaw of a device under the assumption that its
@@ -978,7 +1049,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
978 * struct generic_pm_domain representing a power domain consisting of I/O 1049 * struct generic_pm_domain representing a power domain consisting of I/O
979 * devices. 1050 * devices.
980 */ 1051 */
981static int pm_genpd_thaw_noirq(struct device *dev) 1052static int pm_genpd_thaw_early(struct device *dev)
982{ 1053{
983 struct generic_pm_domain *genpd; 1054 struct generic_pm_domain *genpd;
984 1055
@@ -988,12 +1059,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
988 if (IS_ERR(genpd)) 1059 if (IS_ERR(genpd))
989 return -EINVAL; 1060 return -EINVAL;
990 1061
991 if (genpd->suspend_power_off) 1062 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
992 return 0;
993
994 genpd_start_dev(genpd, dev);
995
996 return genpd_thaw_early(genpd, dev);
997} 1063}
998 1064
999/** 1065/**
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev)
1018} 1084}
1019 1085
1020/** 1086/**
1021 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. 1087 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1022 * @dev: Device to resume. 1088 * @dev: Device to resume.
1023 * 1089 *
1024 * Carry out an early restore of a device under the assumption that its 1090 * Make sure the domain will be in the same power state as before the
1025 * pm_domain field points to the domain member of an object of type 1091 * hibernation the system is resuming from and start the device if necessary.
1026 * struct generic_pm_domain representing a power domain consisting of I/O
1027 * devices.
1028 */ 1092 */
1029static int pm_genpd_restore_noirq(struct device *dev) 1093static int pm_genpd_restore_noirq(struct device *dev)
1030{ 1094{
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev)
1040 * Since all of the "noirq" callbacks are executed sequentially, it is 1104 * Since all of the "noirq" callbacks are executed sequentially, it is
1041 * guaranteed that this function will never run twice in parallel for 1105 * guaranteed that this function will never run twice in parallel for
1042 * the same PM domain, so it is not necessary to use locking here. 1106 * the same PM domain, so it is not necessary to use locking here.
1107 *
1108 * At this point suspended_count == 0 means we are being run for the
1109 * first time for the given domain in the present cycle.
1043 */ 1110 */
1044 genpd->status = GPD_STATE_POWER_OFF; 1111 if (genpd->suspended_count++ == 0) {
1045 if (genpd->suspend_power_off) {
1046 /* 1112 /*
1047 * The boot kernel might put the domain into the power on state, 1113 * The boot kernel might put the domain into arbitrary state,
1048 * so make sure it really is powered off. 1114 * so make it appear as powered off to pm_genpd_poweron(), so
1115 * that it tries to power it on in case it was really off.
1049 */ 1116 */
1050 if (genpd->power_off) 1117 genpd->status = GPD_STATE_POWER_OFF;
1051 genpd->power_off(genpd); 1118 if (genpd->suspend_power_off) {
1052 return 0; 1119 /*
1120 * If the domain was off before the hibernation, make
1121 * sure it will be off going forward.
1122 */
1123 if (genpd->power_off)
1124 genpd->power_off(genpd);
1125
1126 return 0;
1127 }
1053 } 1128 }
1054 1129
1130 if (genpd->suspend_power_off)
1131 return 0;
1132
1055 pm_genpd_poweron(genpd); 1133 pm_genpd_poweron(genpd);
1056 genpd->suspended_count--;
1057 genpd_start_dev(genpd, dev);
1058 1134
1059 return genpd_resume_early(genpd, dev); 1135 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1060} 1136}
1061 1137
1062/** 1138/**
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev)
1099 1175
1100#define pm_genpd_prepare NULL 1176#define pm_genpd_prepare NULL
1101#define pm_genpd_suspend NULL 1177#define pm_genpd_suspend NULL
1178#define pm_genpd_suspend_late NULL
1102#define pm_genpd_suspend_noirq NULL 1179#define pm_genpd_suspend_noirq NULL
1180#define pm_genpd_resume_early NULL
1103#define pm_genpd_resume_noirq NULL 1181#define pm_genpd_resume_noirq NULL
1104#define pm_genpd_resume NULL 1182#define pm_genpd_resume NULL
1105#define pm_genpd_freeze NULL 1183#define pm_genpd_freeze NULL
1184#define pm_genpd_freeze_late NULL
1106#define pm_genpd_freeze_noirq NULL 1185#define pm_genpd_freeze_noirq NULL
1186#define pm_genpd_thaw_early NULL
1107#define pm_genpd_thaw_noirq NULL 1187#define pm_genpd_thaw_noirq NULL
1108#define pm_genpd_thaw NULL 1188#define pm_genpd_thaw NULL
1109#define pm_genpd_restore_noirq NULL 1189#define pm_genpd_restore_noirq NULL
@@ -1171,6 +1251,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1171} 1251}
1172 1252
1173/** 1253/**
1254 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1255 * @genpd_node: Device tree node pointer representing a PM domain to which the
1256 * the device is added to.
1257 * @dev: Device to be added.
1258 * @td: Set of PM QoS timing parameters to attach to the device.
1259 */
1260int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1261 struct gpd_timing_data *td)
1262{
1263 struct generic_pm_domain *genpd = NULL, *gpd;
1264
1265 dev_dbg(dev, "%s()\n", __func__);
1266
1267 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1268 return -EINVAL;
1269
1270 mutex_lock(&gpd_list_lock);
1271 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1272 if (gpd->of_node == genpd_node) {
1273 genpd = gpd;
1274 break;
1275 }
1276 }
1277 mutex_unlock(&gpd_list_lock);
1278
1279 if (!genpd)
1280 return -EINVAL;
1281
1282 return __pm_genpd_add_device(genpd, dev, td);
1283}
1284
1285/**
1174 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1286 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1175 * @genpd: PM domain to remove the device from. 1287 * @genpd: PM domain to remove the device from.
1176 * @dev: Device to be removed. 1288 * @dev: Device to be removed.
@@ -1216,6 +1328,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1216} 1328}
1217 1329
1218/** 1330/**
1331 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1332 * @dev: Device to set/unset the flag for.
1333 * @val: The new value of the device's "always on" flag.
1334 */
1335void pm_genpd_dev_always_on(struct device *dev, bool val)
1336{
1337 struct pm_subsys_data *psd;
1338 unsigned long flags;
1339
1340 spin_lock_irqsave(&dev->power.lock, flags);
1341
1342 psd = dev_to_psd(dev);
1343 if (psd && psd->domain_data)
1344 to_gpd_data(psd->domain_data)->always_on = val;
1345
1346 spin_unlock_irqrestore(&dev->power.lock, flags);
1347}
1348EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1349
1350/**
1219 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1351 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1220 * @genpd: Master PM domain to add the subdomain to. 1352 * @genpd: Master PM domain to add the subdomain to.
1221 * @subdomain: Subdomain to be added. 1353 * @subdomain: Subdomain to be added.
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
1450{ 1582{
1451 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; 1583 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1452 1584
1453 return cb ? cb(dev) : pm_generic_suspend_noirq(dev); 1585 return cb ? cb(dev) : pm_generic_suspend_late(dev);
1454} 1586}
1455 1587
1456/** 1588/**
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
1461{ 1593{
1462 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; 1594 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1463 1595
1464 return cb ? cb(dev) : pm_generic_resume_noirq(dev); 1596 return cb ? cb(dev) : pm_generic_resume_early(dev);
1465} 1597}
1466 1598
1467/** 1599/**
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
1494{ 1626{
1495 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; 1627 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1496 1628
1497 return cb ? cb(dev) : pm_generic_freeze_noirq(dev); 1629 return cb ? cb(dev) : pm_generic_freeze_late(dev);
1498} 1630}
1499 1631
1500/** 1632/**
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
1505{ 1637{
1506 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; 1638 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1507 1639
1508 return cb ? cb(dev) : pm_generic_thaw_noirq(dev); 1640 return cb ? cb(dev) : pm_generic_thaw_early(dev);
1509} 1641}
1510 1642
1511/** 1643/**
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1557 genpd->poweroff_task = NULL; 1689 genpd->poweroff_task = NULL;
1558 genpd->resume_count = 0; 1690 genpd->resume_count = 0;
1559 genpd->device_count = 0; 1691 genpd->device_count = 0;
1560 genpd->suspended_count = 0;
1561 genpd->max_off_time_ns = -1; 1692 genpd->max_off_time_ns = -1;
1562 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1693 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1563 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1694 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1564 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1695 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1565 genpd->domain.ops.prepare = pm_genpd_prepare; 1696 genpd->domain.ops.prepare = pm_genpd_prepare;
1566 genpd->domain.ops.suspend = pm_genpd_suspend; 1697 genpd->domain.ops.suspend = pm_genpd_suspend;
1698 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1567 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1699 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1568 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1700 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1701 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1569 genpd->domain.ops.resume = pm_genpd_resume; 1702 genpd->domain.ops.resume = pm_genpd_resume;
1570 genpd->domain.ops.freeze = pm_genpd_freeze; 1703 genpd->domain.ops.freeze = pm_genpd_freeze;
1704 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1571 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1705 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1572 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1706 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1707 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1573 genpd->domain.ops.thaw = pm_genpd_thaw; 1708 genpd->domain.ops.thaw = pm_genpd_thaw;
1574 genpd->domain.ops.poweroff = pm_genpd_suspend; 1709 genpd->domain.ops.poweroff = pm_genpd_suspend;
1710 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1575 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1711 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1576 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1712 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1713 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1577 genpd->domain.ops.restore = pm_genpd_resume; 1714 genpd->domain.ops.restore = pm_genpd_resume;
1578 genpd->domain.ops.complete = pm_genpd_complete; 1715 genpd->domain.ops.complete = pm_genpd_complete;
1579 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1716 genpd->dev_ops.save_state = pm_genpd_default_save_state;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 10bdd793f0bd..d03d290f31c2 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
92} 92}
93 93
94/** 94/**
95 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 95 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
96 * @dev: Device to handle. 96 * @dev: Device to suspend.
97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage.
99 *
100 * Execute the PM callback corresponding to @event provided by the driver of
101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * not present.
103 */ 97 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 98int pm_generic_suspend_noirq(struct device *dev)
105{ 99{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 100 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *);
108
109 if (!pm)
110 return 0;
111
112 switch (event) {
113 case PM_EVENT_SUSPEND:
114 callback = noirq ? pm->suspend_noirq : pm->suspend;
115 break;
116 case PM_EVENT_FREEZE:
117 callback = noirq ? pm->freeze_noirq : pm->freeze;
118 break;
119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
125 case PM_EVENT_THAW:
126 callback = noirq ? pm->thaw_noirq : pm->thaw;
127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
131 default:
132 callback = NULL;
133 break;
134 }
135 101
136 return callback ? callback(dev) : 0; 102 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
137} 103}
104EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
138 105
139/** 106/**
140 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. 107 * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
141 * @dev: Device to suspend. 108 * @dev: Device to suspend.
142 */ 109 */
143int pm_generic_suspend_noirq(struct device *dev) 110int pm_generic_suspend_late(struct device *dev)
144{ 111{
145 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); 112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113
114 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
146} 115}
147EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); 116EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
148 117
149/** 118/**
150 * pm_generic_suspend - Generic suspend callback for subsystems. 119 * pm_generic_suspend - Generic suspend callback for subsystems.
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
152 */ 121 */
153int pm_generic_suspend(struct device *dev) 122int pm_generic_suspend(struct device *dev)
154{ 123{
155 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); 124 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
125
126 return pm && pm->suspend ? pm->suspend(dev) : 0;
156} 127}
157EXPORT_SYMBOL_GPL(pm_generic_suspend); 128EXPORT_SYMBOL_GPL(pm_generic_suspend);
158 129
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
162 */ 133 */
163int pm_generic_freeze_noirq(struct device *dev) 134int pm_generic_freeze_noirq(struct device *dev)
164{ 135{
165 return __pm_generic_call(dev, PM_EVENT_FREEZE, true); 136 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
137
138 return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
166} 139}
167EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); 140EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
168 141
169/** 142/**
143 * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
144 * @dev: Device to freeze.
145 */
146int pm_generic_freeze_late(struct device *dev)
147{
148 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
149
150 return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
151}
152EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
153
154/**
170 * pm_generic_freeze - Generic freeze callback for subsystems. 155 * pm_generic_freeze - Generic freeze callback for subsystems.
171 * @dev: Device to freeze. 156 * @dev: Device to freeze.
172 */ 157 */
173int pm_generic_freeze(struct device *dev) 158int pm_generic_freeze(struct device *dev)
174{ 159{
175 return __pm_generic_call(dev, PM_EVENT_FREEZE, false); 160 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161
162 return pm && pm->freeze ? pm->freeze(dev) : 0;
176} 163}
177EXPORT_SYMBOL_GPL(pm_generic_freeze); 164EXPORT_SYMBOL_GPL(pm_generic_freeze);
178 165
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
182 */ 169 */
183int pm_generic_poweroff_noirq(struct device *dev) 170int pm_generic_poweroff_noirq(struct device *dev)
184{ 171{
185 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); 172 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
173
174 return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
186} 175}
187EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); 176EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
188 177
189/** 178/**
179 * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
180 * @dev: Device to handle.
181 */
182int pm_generic_poweroff_late(struct device *dev)
183{
184 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
185
186 return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
187}
188EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
189
190/**
190 * pm_generic_poweroff - Generic poweroff callback for subsystems. 191 * pm_generic_poweroff - Generic poweroff callback for subsystems.
191 * @dev: Device to handle. 192 * @dev: Device to handle.
192 */ 193 */
193int pm_generic_poweroff(struct device *dev) 194int pm_generic_poweroff(struct device *dev)
194{ 195{
195 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); 196 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197
198 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
196} 199}
197EXPORT_SYMBOL_GPL(pm_generic_poweroff); 200EXPORT_SYMBOL_GPL(pm_generic_poweroff);
198 201
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
202 */ 205 */
203int pm_generic_thaw_noirq(struct device *dev) 206int pm_generic_thaw_noirq(struct device *dev)
204{ 207{
205 return __pm_generic_call(dev, PM_EVENT_THAW, true); 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209
210 return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
206} 211}
207EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); 212EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
208 213
209/** 214/**
215 * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
216 * @dev: Device to thaw.
217 */
218int pm_generic_thaw_early(struct device *dev)
219{
220 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221
222 return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
223}
224EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
225
226/**
210 * pm_generic_thaw - Generic thaw callback for subsystems. 227 * pm_generic_thaw - Generic thaw callback for subsystems.
211 * @dev: Device to thaw. 228 * @dev: Device to thaw.
212 */ 229 */
213int pm_generic_thaw(struct device *dev) 230int pm_generic_thaw(struct device *dev)
214{ 231{
215 return __pm_generic_call(dev, PM_EVENT_THAW, false); 232 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
233
234 return pm && pm->thaw ? pm->thaw(dev) : 0;
216} 235}
217EXPORT_SYMBOL_GPL(pm_generic_thaw); 236EXPORT_SYMBOL_GPL(pm_generic_thaw);
218 237
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
222 */ 241 */
223int pm_generic_resume_noirq(struct device *dev) 242int pm_generic_resume_noirq(struct device *dev)
224{ 243{
225 return __pm_generic_call(dev, PM_EVENT_RESUME, true); 244 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
245
246 return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
226} 247}
227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 248EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
228 249
229/** 250/**
251 * pm_generic_resume_early - Generic resume_early callback for subsystems.
252 * @dev: Device to resume.
253 */
254int pm_generic_resume_early(struct device *dev)
255{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257
258 return pm && pm->resume_early ? pm->resume_early(dev) : 0;
259}
260EXPORT_SYMBOL_GPL(pm_generic_resume_early);
261
262/**
230 * pm_generic_resume - Generic resume callback for subsystems. 263 * pm_generic_resume - Generic resume callback for subsystems.
231 * @dev: Device to resume. 264 * @dev: Device to resume.
232 */ 265 */
233int pm_generic_resume(struct device *dev) 266int pm_generic_resume(struct device *dev)
234{ 267{
235 return __pm_generic_call(dev, PM_EVENT_RESUME, false); 268 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
269
270 return pm && pm->resume ? pm->resume(dev) : 0;
236} 271}
237EXPORT_SYMBOL_GPL(pm_generic_resume); 272EXPORT_SYMBOL_GPL(pm_generic_resume);
238 273
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
242 */ 277 */
243int pm_generic_restore_noirq(struct device *dev) 278int pm_generic_restore_noirq(struct device *dev)
244{ 279{
245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true); 280 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
281
282 return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
246} 283}
247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
248 285
249/** 286/**
287 * pm_generic_restore_early - Generic restore_early callback for subsystems.
288 * @dev: Device to resume.
289 */
290int pm_generic_restore_early(struct device *dev)
291{
292 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
293
294 return pm && pm->restore_early ? pm->restore_early(dev) : 0;
295}
296EXPORT_SYMBOL_GPL(pm_generic_restore_early);
297
298/**
250 * pm_generic_restore - Generic restore callback for subsystems. 299 * pm_generic_restore - Generic restore callback for subsystems.
251 * @dev: Device to restore. 300 * @dev: Device to restore.
252 */ 301 */
253int pm_generic_restore(struct device *dev) 302int pm_generic_restore(struct device *dev)
254{ 303{
255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false); 304 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
305
306 return pm && pm->restore ? pm->restore(dev) : 0;
256} 307}
257EXPORT_SYMBOL_GPL(pm_generic_restore); 308EXPORT_SYMBOL_GPL(pm_generic_restore);
258 309
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e2cc3d2e0ecc..b462c0e341cb 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
47LIST_HEAD(dpm_list); 47LIST_HEAD(dpm_list);
48LIST_HEAD(dpm_prepared_list); 48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list); 49LIST_HEAD(dpm_suspended_list);
50LIST_HEAD(dpm_late_early_list);
50LIST_HEAD(dpm_noirq_list); 51LIST_HEAD(dpm_noirq_list);
51 52
52struct suspend_stats suspend_stats; 53struct suspend_stats suspend_stats;
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
246} 247}
247 248
248/** 249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
283/**
249 * pm_noirq_op - Return the PM operation appropriate for given PM event. 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
250 * @ops: PM operations to choose from. 285 * @ops: PM operations to choose from.
251 * @state: PM transition of the system being carried out. 286 * @state: PM transition of the system being carried out.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
374 TRACE_RESUME(0); 409 TRACE_RESUME(0);
375 410
376 if (dev->pm_domain) { 411 if (dev->pm_domain) {
377 info = "EARLY power domain "; 412 info = "noirq power domain ";
378 callback = pm_noirq_op(&dev->pm_domain->ops, state); 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379 } else if (dev->type && dev->type->pm) { 414 } else if (dev->type && dev->type->pm) {
380 info = "EARLY type "; 415 info = "noirq type ";
381 callback = pm_noirq_op(dev->type->pm, state); 416 callback = pm_noirq_op(dev->type->pm, state);
382 } else if (dev->class && dev->class->pm) { 417 } else if (dev->class && dev->class->pm) {
383 info = "EARLY class "; 418 info = "noirq class ";
384 callback = pm_noirq_op(dev->class->pm, state); 419 callback = pm_noirq_op(dev->class->pm, state);
385 } else if (dev->bus && dev->bus->pm) { 420 } else if (dev->bus && dev->bus->pm) {
386 info = "EARLY bus "; 421 info = "noirq bus ";
387 callback = pm_noirq_op(dev->bus->pm, state); 422 callback = pm_noirq_op(dev->bus->pm, state);
388 } 423 }
389 424
390 if (!callback && dev->driver && dev->driver->pm) { 425 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver "; 426 info = "noirq driver ";
392 callback = pm_noirq_op(dev->driver->pm, state); 427 callback = pm_noirq_op(dev->driver->pm, state);
393 } 428 }
394 429
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
399} 434}
400 435
401/** 436/**
402 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
403 * @state: PM transition of the system being carried out. 438 * @state: PM transition of the system being carried out.
404 * 439 *
405 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
406 * enable device drivers to receive interrupts. 441 * enable device drivers to receive interrupts.
407 */ 442 */
408void dpm_resume_noirq(pm_message_t state) 443static void dpm_resume_noirq(pm_message_t state)
409{ 444{
410 ktime_t starttime = ktime_get(); 445 ktime_t starttime = ktime_get();
411 446
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
415 int error; 450 int error;
416 451
417 get_device(dev); 452 get_device(dev);
418 list_move_tail(&dev->power.entry, &dpm_suspended_list); 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
419 mutex_unlock(&dpm_list_mtx); 454 mutex_unlock(&dpm_list_mtx);
420 455
421 error = device_resume_noirq(dev, state); 456 error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
423 suspend_stats.failed_resume_noirq++; 458 suspend_stats.failed_resume_noirq++;
424 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
425 dpm_save_failed_dev(dev_name(dev)); 460 dpm_save_failed_dev(dev_name(dev));
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470}
471
472/**
473 * device_resume_early - Execute an "early resume" callback for given device.
474 * @dev: Device to handle.
475 * @state: PM transition of the system being carried out.
476 *
477 * Runtime PM is disabled for @dev while this function is being executed.
478 */
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->pm_domain) {
489 info = "early power domain ";
490 callback = pm_late_early_op(&dev->pm_domain->ops, state);
491 } else if (dev->type && dev->type->pm) {
492 info = "early type ";
493 callback = pm_late_early_op(dev->type->pm, state);
494 } else if (dev->class && dev->class->pm) {
495 info = "early class ";
496 callback = pm_late_early_op(dev->class->pm, state);
497 } else if (dev->bus && dev->bus->pm) {
498 info = "early bus ";
499 callback = pm_late_early_op(dev->bus->pm, state);
500 }
501
502 if (!callback && dev->driver && dev->driver->pm) {
503 info = "early driver ";
504 callback = pm_late_early_op(dev->driver->pm, state);
505 }
506
507 error = dpm_run_callback(callback, dev, state, info);
508
509 TRACE_RESUME(error);
510 return error;
511}
512
513/**
514 * dpm_resume_early - Execute "early resume" callbacks for all devices.
515 * @state: PM transition of the system being carried out.
516 */
517static void dpm_resume_early(pm_message_t state)
518{
519 ktime_t starttime = ktime_get();
520
521 mutex_lock(&dpm_list_mtx);
522 while (!list_empty(&dpm_late_early_list)) {
523 struct device *dev = to_device(dpm_late_early_list.next);
524 int error;
525
526 get_device(dev);
527 list_move_tail(&dev->power.entry, &dpm_suspended_list);
528 mutex_unlock(&dpm_list_mtx);
529
530 error = device_resume_early(dev, state);
531 if (error) {
532 suspend_stats.failed_resume_early++;
533 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
534 dpm_save_failed_dev(dev_name(dev));
426 pm_dev_err(dev, state, " early", error); 535 pm_dev_err(dev, state, " early", error);
427 } 536 }
428 537
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
431 } 540 }
432 mutex_unlock(&dpm_list_mtx); 541 mutex_unlock(&dpm_list_mtx);
433 dpm_show_time(starttime, state, "early"); 542 dpm_show_time(starttime, state, "early");
434 resume_device_irqs();
435} 543}
436EXPORT_SYMBOL_GPL(dpm_resume_noirq); 544
545/**
546 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
547 * @state: PM transition of the system being carried out.
548 */
549void dpm_resume_start(pm_message_t state)
550{
551 dpm_resume_noirq(state);
552 dpm_resume_early(state);
553}
554EXPORT_SYMBOL_GPL(dpm_resume_start);
437 555
438/** 556/**
439 * device_resume - Execute "resume" callbacks for given device. 557 * device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
716 char *info = NULL; 834 char *info = NULL;
717 835
718 if (dev->pm_domain) { 836 if (dev->pm_domain) {
719 info = "LATE power domain "; 837 info = "noirq power domain ";
720 callback = pm_noirq_op(&dev->pm_domain->ops, state); 838 callback = pm_noirq_op(&dev->pm_domain->ops, state);
721 } else if (dev->type && dev->type->pm) { 839 } else if (dev->type && dev->type->pm) {
722 info = "LATE type "; 840 info = "noirq type ";
723 callback = pm_noirq_op(dev->type->pm, state); 841 callback = pm_noirq_op(dev->type->pm, state);
724 } else if (dev->class && dev->class->pm) { 842 } else if (dev->class && dev->class->pm) {
725 info = "LATE class "; 843 info = "noirq class ";
726 callback = pm_noirq_op(dev->class->pm, state); 844 callback = pm_noirq_op(dev->class->pm, state);
727 } else if (dev->bus && dev->bus->pm) { 845 } else if (dev->bus && dev->bus->pm) {
728 info = "LATE bus "; 846 info = "noirq bus ";
729 callback = pm_noirq_op(dev->bus->pm, state); 847 callback = pm_noirq_op(dev->bus->pm, state);
730 } 848 }
731 849
732 if (!callback && dev->driver && dev->driver->pm) { 850 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver "; 851 info = "noirq driver ";
734 callback = pm_noirq_op(dev->driver->pm, state); 852 callback = pm_noirq_op(dev->driver->pm, state);
735 } 853 }
736 854
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
738} 856}
739 857
740/** 858/**
741 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
742 * @state: PM transition of the system being carried out. 860 * @state: PM transition of the system being carried out.
743 * 861 *
744 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
745 * handlers for all non-sysdev devices. 863 * handlers for all non-sysdev devices.
746 */ 864 */
747int dpm_suspend_noirq(pm_message_t state) 865static int dpm_suspend_noirq(pm_message_t state)
748{ 866{
749 ktime_t starttime = ktime_get(); 867 ktime_t starttime = ktime_get();
750 int error = 0; 868 int error = 0;
751 869
752 suspend_device_irqs(); 870 suspend_device_irqs();
753 mutex_lock(&dpm_list_mtx); 871 mutex_lock(&dpm_list_mtx);
754 while (!list_empty(&dpm_suspended_list)) { 872 while (!list_empty(&dpm_late_early_list)) {
755 struct device *dev = to_device(dpm_suspended_list.prev); 873 struct device *dev = to_device(dpm_late_early_list.prev);
756 874
757 get_device(dev); 875 get_device(dev);
758 mutex_unlock(&dpm_list_mtx); 876 mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
761 879
762 mutex_lock(&dpm_list_mtx); 880 mutex_lock(&dpm_list_mtx);
763 if (error) { 881 if (error) {
764 pm_dev_err(dev, state, " late", error); 882 pm_dev_err(dev, state, " noirq", error);
765 suspend_stats.failed_suspend_noirq++; 883 suspend_stats.failed_suspend_noirq++;
766 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
767 dpm_save_failed_dev(dev_name(dev)); 885 dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
776 if (error) 894 if (error)
777 dpm_resume_noirq(resume_event(state)); 895 dpm_resume_noirq(resume_event(state));
778 else 896 else
897 dpm_show_time(starttime, state, "noirq");
898 return error;
899}
900
901/**
902 * device_suspend_late - Execute a "late suspend" callback for given device.
903 * @dev: Device to handle.
904 * @state: PM transition of the system being carried out.
905 *
906 * Runtime PM is disabled for @dev while this function is being executed.
907 */
908static int device_suspend_late(struct device *dev, pm_message_t state)
909{
910 pm_callback_t callback = NULL;
911 char *info = NULL;
912
913 if (dev->pm_domain) {
914 info = "late power domain ";
915 callback = pm_late_early_op(&dev->pm_domain->ops, state);
916 } else if (dev->type && dev->type->pm) {
917 info = "late type ";
918 callback = pm_late_early_op(dev->type->pm, state);
919 } else if (dev->class && dev->class->pm) {
920 info = "late class ";
921 callback = pm_late_early_op(dev->class->pm, state);
922 } else if (dev->bus && dev->bus->pm) {
923 info = "late bus ";
924 callback = pm_late_early_op(dev->bus->pm, state);
925 }
926
927 if (!callback && dev->driver && dev->driver->pm) {
928 info = "late driver ";
929 callback = pm_late_early_op(dev->driver->pm, state);
930 }
931
932 return dpm_run_callback(callback, dev, state, info);
933}
934
935/**
936 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
937 * @state: PM transition of the system being carried out.
938 */
939static int dpm_suspend_late(pm_message_t state)
940{
941 ktime_t starttime = ktime_get();
942 int error = 0;
943
944 mutex_lock(&dpm_list_mtx);
945 while (!list_empty(&dpm_suspended_list)) {
946 struct device *dev = to_device(dpm_suspended_list.prev);
947
948 get_device(dev);
949 mutex_unlock(&dpm_list_mtx);
950
951 error = device_suspend_late(dev, state);
952
953 mutex_lock(&dpm_list_mtx);
954 if (error) {
955 pm_dev_err(dev, state, " late", error);
956 suspend_stats.failed_suspend_late++;
957 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
958 dpm_save_failed_dev(dev_name(dev));
959 put_device(dev);
960 break;
961 }
962 if (!list_empty(&dev->power.entry))
963 list_move(&dev->power.entry, &dpm_late_early_list);
964 put_device(dev);
965 }
966 mutex_unlock(&dpm_list_mtx);
967 if (error)
968 dpm_resume_early(resume_event(state));
969 else
779 dpm_show_time(starttime, state, "late"); 970 dpm_show_time(starttime, state, "late");
971
780 return error; 972 return error;
781} 973}
782EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 974
975/**
976 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
977 * @state: PM transition of the system being carried out.
978 */
979int dpm_suspend_end(pm_message_t state)
980{
981 int error = dpm_suspend_late(state);
982
983 return error ? : dpm_suspend_noirq(state);
984}
985EXPORT_SYMBOL_GPL(dpm_suspend_end);
783 986
784/** 987/**
785 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 988 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 9bf62323aaf3..eeb4bff9505c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
71extern void rpm_sysfs_remove(struct device *dev); 71extern void rpm_sysfs_remove(struct device *dev);
72extern int wakeup_sysfs_add(struct device *dev); 72extern int wakeup_sysfs_add(struct device *dev);
73extern void wakeup_sysfs_remove(struct device *dev); 73extern void wakeup_sysfs_remove(struct device *dev);
74extern int pm_qos_sysfs_add(struct device *dev);
75extern void pm_qos_sysfs_remove(struct device *dev);
74 76
75#else /* CONFIG_PM */ 77#else /* CONFIG_PM */
76 78
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
79static inline void rpm_sysfs_remove(struct device *dev) {} 81static inline void rpm_sysfs_remove(struct device *dev) {}
80static inline int wakeup_sysfs_add(struct device *dev) { return 0; } 82static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
81static inline void wakeup_sysfs_remove(struct device *dev) {} 83static inline void wakeup_sysfs_remove(struct device *dev) {}
84static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
85static inline void pm_qos_sysfs_remove(struct device *dev) {}
82 86
83#endif 87#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index c5d358837461..71855570922d 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43 43
44#include "power.h"
44 45
45static DEFINE_MUTEX(dev_pm_qos_mtx); 46static DEFINE_MUTEX(dev_pm_qos_mtx);
46 47
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
166 struct dev_pm_qos_request *req, *tmp; 167 struct dev_pm_qos_request *req, *tmp;
167 struct pm_qos_constraints *c; 168 struct pm_qos_constraints *c;
168 169
170 /*
171 * If the device's PM QoS resume latency limit has been exposed to user
172 * space, it has to be hidden at this point.
173 */
174 dev_pm_qos_hide_latency_limit(dev);
175
169 mutex_lock(&dev_pm_qos_mtx); 176 mutex_lock(&dev_pm_qos_mtx);
170 177
171 dev->power.power_state = PMSG_INVALID; 178 dev->power.power_state = PMSG_INVALID;
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
445 return error; 452 return error;
446} 453}
447EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 454EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
455
456#ifdef CONFIG_PM_RUNTIME
457static void __dev_pm_qos_drop_user_request(struct device *dev)
458{
459 dev_pm_qos_remove_request(dev->power.pq_req);
460 dev->power.pq_req = 0;
461}
462
463/**
464 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
465 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
466 * @value: Initial value of the latency limit.
467 */
468int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
469{
470 struct dev_pm_qos_request *req;
471 int ret;
472
473 if (!device_is_registered(dev) || value < 0)
474 return -EINVAL;
475
476 if (dev->power.pq_req)
477 return -EEXIST;
478
479 req = kzalloc(sizeof(*req), GFP_KERNEL);
480 if (!req)
481 return -ENOMEM;
482
483 ret = dev_pm_qos_add_request(dev, req, value);
484 if (ret < 0)
485 return ret;
486
487 dev->power.pq_req = req;
488 ret = pm_qos_sysfs_add(dev);
489 if (ret)
490 __dev_pm_qos_drop_user_request(dev);
491
492 return ret;
493}
494EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
495
496/**
497 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
498 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
499 */
500void dev_pm_qos_hide_latency_limit(struct device *dev)
501{
502 if (dev->power.pq_req) {
503 pm_qos_sysfs_remove(dev);
504 __dev_pm_qos_drop_user_request(dev);
505 }
506}
507EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
508#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index adf41be0ea66..95c12f6cb5b9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/pm_qos.h>
8#include <linux/pm_runtime.h> 9#include <linux/pm_runtime.h>
9#include <linux/atomic.h> 10#include <linux/atomic.h>
10#include <linux/jiffies.h> 11#include <linux/jiffies.h>
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
217static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 218static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
218 autosuspend_delay_ms_store); 219 autosuspend_delay_ms_store);
219 220
221static ssize_t pm_qos_latency_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
225}
226
227static ssize_t pm_qos_latency_store(struct device *dev,
228 struct device_attribute *attr,
229 const char *buf, size_t n)
230{
231 s32 value;
232 int ret;
233
234 if (kstrtos32(buf, 0, &value))
235 return -EINVAL;
236
237 if (value < 0)
238 return -EINVAL;
239
240 ret = dev_pm_qos_update_request(dev->power.pq_req, value);
241 return ret < 0 ? ret : n;
242}
243
244static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
245 pm_qos_latency_show, pm_qos_latency_store);
220#endif /* CONFIG_PM_RUNTIME */ 246#endif /* CONFIG_PM_RUNTIME */
221 247
222#ifdef CONFIG_PM_SLEEP 248#ifdef CONFIG_PM_SLEEP
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
490 .attrs = runtime_attrs, 516 .attrs = runtime_attrs,
491}; 517};
492 518
519static struct attribute *pm_qos_attrs[] = {
520#ifdef CONFIG_PM_RUNTIME
521 &dev_attr_pm_qos_resume_latency_us.attr,
522#endif /* CONFIG_PM_RUNTIME */
523 NULL,
524};
525static struct attribute_group pm_qos_attr_group = {
526 .name = power_group_name,
527 .attrs = pm_qos_attrs,
528};
529
493int dpm_sysfs_add(struct device *dev) 530int dpm_sysfs_add(struct device *dev)
494{ 531{
495 int rc; 532 int rc;
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
530 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 567 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
531} 568}
532 569
570int pm_qos_sysfs_add(struct device *dev)
571{
572 return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
573}
574
575void pm_qos_sysfs_remove(struct device *dev)
576{
577 sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
578}
579
533void rpm_sysfs_remove(struct device *dev) 580void rpm_sysfs_remove(struct device *dev)
534{ 581{
535 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 582 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index caf995fb774b..2a3e581b8dcd 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data);
53static LIST_HEAD(wakeup_sources); 53static LIST_HEAD(wakeup_sources);
54 54
55/** 55/**
56 * wakeup_source_prepare - Prepare a new wakeup source for initialization.
57 * @ws: Wakeup source to prepare.
58 * @name: Pointer to the name of the new wakeup source.
59 *
60 * Callers must ensure that the @name string won't be freed when @ws is still in
61 * use.
62 */
63void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
64{
65 if (ws) {
66 memset(ws, 0, sizeof(*ws));
67 ws->name = name;
68 }
69}
70EXPORT_SYMBOL_GPL(wakeup_source_prepare);
71
72/**
56 * wakeup_source_create - Create a struct wakeup_source object. 73 * wakeup_source_create - Create a struct wakeup_source object.
57 * @name: Name of the new wakeup source. 74 * @name: Name of the new wakeup source.
58 */ 75 */
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name)
60{ 77{
61 struct wakeup_source *ws; 78 struct wakeup_source *ws;
62 79
63 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 80 ws = kmalloc(sizeof(*ws), GFP_KERNEL);
64 if (!ws) 81 if (!ws)
65 return NULL; 82 return NULL;
66 83
67 spin_lock_init(&ws->lock); 84 wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
68 if (name)
69 ws->name = kstrdup(name, GFP_KERNEL);
70
71 return ws; 85 return ws;
72} 86}
73EXPORT_SYMBOL_GPL(wakeup_source_create); 87EXPORT_SYMBOL_GPL(wakeup_source_create);
74 88
75/** 89/**
90 * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
91 * @ws: Wakeup source to prepare for destruction.
92 *
93 * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
94 * be run in parallel with this function for the same wakeup source object.
95 */
96void wakeup_source_drop(struct wakeup_source *ws)
97{
98 if (!ws)
99 return;
100
101 del_timer_sync(&ws->timer);
102 __pm_relax(ws);
103}
104EXPORT_SYMBOL_GPL(wakeup_source_drop);
105
106/**
76 * wakeup_source_destroy - Destroy a struct wakeup_source object. 107 * wakeup_source_destroy - Destroy a struct wakeup_source object.
77 * @ws: Wakeup source to destroy. 108 * @ws: Wakeup source to destroy.
109 *
110 * Use only for wakeup source objects created with wakeup_source_create().
78 */ 111 */
79void wakeup_source_destroy(struct wakeup_source *ws) 112void wakeup_source_destroy(struct wakeup_source *ws)
80{ 113{
81 if (!ws) 114 if (!ws)
82 return; 115 return;
83 116
84 spin_lock_irq(&ws->lock); 117 wakeup_source_drop(ws);
85 while (ws->active) {
86 spin_unlock_irq(&ws->lock);
87
88 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
89
90 spin_lock_irq(&ws->lock);
91 }
92 spin_unlock_irq(&ws->lock);
93
94 kfree(ws->name); 118 kfree(ws->name);
95 kfree(ws); 119 kfree(ws);
96} 120}
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws)
105 if (WARN_ON(!ws)) 129 if (WARN_ON(!ws))
106 return; 130 return;
107 131
132 spin_lock_init(&ws->lock);
108 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 133 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
109 ws->active = false; 134 ws->active = false;
110 135
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register);
152 */ 177 */
153void wakeup_source_unregister(struct wakeup_source *ws) 178void wakeup_source_unregister(struct wakeup_source *ws)
154{ 179{
155 wakeup_source_remove(ws); 180 if (ws) {
156 wakeup_source_destroy(ws); 181 wakeup_source_remove(ws);
182 wakeup_source_destroy(ws);
183 }
157} 184}
158EXPORT_SYMBOL_GPL(wakeup_source_unregister); 185EXPORT_SYMBOL_GPL(wakeup_source_unregister);
159 186
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws)
349{ 376{
350 ws->active = true; 377 ws->active = true;
351 ws->active_count++; 378 ws->active_count++;
352 ws->timer_expires = jiffies;
353 ws->last_time = ktime_get(); 379 ws->last_time = ktime_get();
354 380
355 /* Increment the counter of events in progress. */ 381 /* Increment the counter of events in progress. */
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws)
370 return; 396 return;
371 397
372 spin_lock_irqsave(&ws->lock, flags); 398 spin_lock_irqsave(&ws->lock, flags);
399
373 ws->event_count++; 400 ws->event_count++;
374 if (!ws->active) 401 if (!ws->active)
375 wakeup_source_activate(ws); 402 wakeup_source_activate(ws);
403
404 del_timer(&ws->timer);
405 ws->timer_expires = 0;
406
376 spin_unlock_irqrestore(&ws->lock, flags); 407 spin_unlock_irqrestore(&ws->lock, flags);
377} 408}
378EXPORT_SYMBOL_GPL(__pm_stay_awake); 409EXPORT_SYMBOL_GPL(__pm_stay_awake);
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
438 ws->max_time = duration; 469 ws->max_time = duration;
439 470
440 del_timer(&ws->timer); 471 del_timer(&ws->timer);
472 ws->timer_expires = 0;
441 473
442 /* 474 /*
443 * Increment the counter of registered wakeup events and decrement the 475 * Increment the counter of registered wakeup events and decrement the
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax);
492 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 524 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
493 * @data: Address of the wakeup source object associated with the event source. 525 * @data: Address of the wakeup source object associated with the event source.
494 * 526 *
495 * Call __pm_relax() for the wakeup source whose address is stored in @data. 527 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
528 * in @data if it is currently active and its timer has not been canceled and
529 * the expiration time of the timer is not in future.
496 */ 530 */
497static void pm_wakeup_timer_fn(unsigned long data) 531static void pm_wakeup_timer_fn(unsigned long data)
498{ 532{
499 __pm_relax((struct wakeup_source *)data); 533 struct wakeup_source *ws = (struct wakeup_source *)data;
534 unsigned long flags;
535
536 spin_lock_irqsave(&ws->lock, flags);
537
538 if (ws->active && ws->timer_expires
539 && time_after_eq(jiffies, ws->timer_expires))
540 wakeup_source_deactivate(ws);
541
542 spin_unlock_irqrestore(&ws->lock, flags);
500} 543}
501 544
502/** 545/**
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
534 if (!expires) 577 if (!expires)
535 expires = 1; 578 expires = 1;
536 579
537 if (time_after(expires, ws->timer_expires)) { 580 if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
538 mod_timer(&ws->timer, expires); 581 mod_timer(&ws->timer, expires);
539 ws->timer_expires = expires; 582 ws->timer_expires = expires;
540 } 583 }
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index ca09bc421ddb..32fe9ef5cc5c 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -32,6 +32,7 @@
32#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/pm_domain.h>
35 36
36struct sh_cmt_priv { 37struct sh_cmt_priv {
37 void __iomem *mapbase; 38 void __iomem *mapbase;
@@ -689,6 +690,9 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
689 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 690 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
690 int ret; 691 int ret;
691 692
693 if (!is_early_platform_device(pdev))
694 pm_genpd_dev_always_on(&pdev->dev, true);
695
692 if (p) { 696 if (p) {
693 dev_info(&pdev->dev, "kept as earlytimer\n"); 697 dev_info(&pdev->dev, "kept as earlytimer\n");
694 return 0; 698 return 0;
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index db8d5955bad4..a2172f690418 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -31,6 +31,7 @@
31#include <linux/sh_timer.h> 31#include <linux/sh_timer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/pm_domain.h>
34 35
35struct sh_mtu2_priv { 36struct sh_mtu2_priv {
36 void __iomem *mapbase; 37 void __iomem *mapbase;
@@ -306,6 +307,9 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
306 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 307 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
307 int ret; 308 int ret;
308 309
310 if (!is_early_platform_device(pdev))
311 pm_genpd_dev_always_on(&pdev->dev, true);
312
309 if (p) { 313 if (p) {
310 dev_info(&pdev->dev, "kept as earlytimer\n"); 314 dev_info(&pdev->dev, "kept as earlytimer\n");
311 return 0; 315 return 0;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 079e96ad44e8..97f54b634be4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -32,6 +32,7 @@
32#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/pm_domain.h>
35 36
36struct sh_tmu_priv { 37struct sh_tmu_priv {
37 void __iomem *mapbase; 38 void __iomem *mapbase;
@@ -410,6 +411,9 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
410 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 411 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
411 int ret; 412 int ret;
412 413
414 if (!is_early_platform_device(pdev))
415 pm_genpd_dev_always_on(&pdev->dev, true);
416
413 if (p) { 417 if (p) {
414 dev_info(&pdev->dev, "kept as earlytimer\n"); 418 dev_info(&pdev->dev, "kept as earlytimer\n");
415 return 0; 419 return 0;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c189b82f5ece..70c31d43fff3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -83,6 +83,7 @@ int update_devfreq(struct devfreq *devfreq)
83{ 83{
84 unsigned long freq; 84 unsigned long freq;
85 int err = 0; 85 int err = 0;
86 u32 flags = 0;
86 87
87 if (!mutex_is_locked(&devfreq->lock)) { 88 if (!mutex_is_locked(&devfreq->lock)) {
88 WARN(true, "devfreq->lock must be locked by the caller.\n"); 89 WARN(true, "devfreq->lock must be locked by the caller.\n");
@@ -94,7 +95,24 @@ int update_devfreq(struct devfreq *devfreq)
94 if (err) 95 if (err)
95 return err; 96 return err;
96 97
97 err = devfreq->profile->target(devfreq->dev.parent, &freq); 98 /*
99 * Adjust the freuqency with user freq and QoS.
100 *
101 * List from the highest proiority
102 * max_freq (probably called by thermal when it's too hot)
103 * min_freq
104 */
105
106 if (devfreq->min_freq && freq < devfreq->min_freq) {
107 freq = devfreq->min_freq;
108 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
109 }
110 if (devfreq->max_freq && freq > devfreq->max_freq) {
111 freq = devfreq->max_freq;
112 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
113 }
114
115 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
98 if (err) 116 if (err)
99 return err; 117 return err;
100 118
@@ -501,12 +519,82 @@ static ssize_t show_central_polling(struct device *dev,
501 !to_devfreq(dev)->governor->no_central_polling); 519 !to_devfreq(dev)->governor->no_central_polling);
502} 520}
503 521
522static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
523 const char *buf, size_t count)
524{
525 struct devfreq *df = to_devfreq(dev);
526 unsigned long value;
527 int ret;
528 unsigned long max;
529
530 ret = sscanf(buf, "%lu", &value);
531 if (ret != 1)
532 goto out;
533
534 mutex_lock(&df->lock);
535 max = df->max_freq;
536 if (value && max && value > max) {
537 ret = -EINVAL;
538 goto unlock;
539 }
540
541 df->min_freq = value;
542 update_devfreq(df);
543 ret = count;
544unlock:
545 mutex_unlock(&df->lock);
546out:
547 return ret;
548}
549
550static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
551 char *buf)
552{
553 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
554}
555
556static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
557 const char *buf, size_t count)
558{
559 struct devfreq *df = to_devfreq(dev);
560 unsigned long value;
561 int ret;
562 unsigned long min;
563
564 ret = sscanf(buf, "%lu", &value);
565 if (ret != 1)
566 goto out;
567
568 mutex_lock(&df->lock);
569 min = df->min_freq;
570 if (value && min && value < min) {
571 ret = -EINVAL;
572 goto unlock;
573 }
574
575 df->max_freq = value;
576 update_devfreq(df);
577 ret = count;
578unlock:
579 mutex_unlock(&df->lock);
580out:
581 return ret;
582}
583
584static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
585 char *buf)
586{
587 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
588}
589
504static struct device_attribute devfreq_attrs[] = { 590static struct device_attribute devfreq_attrs[] = {
505 __ATTR(governor, S_IRUGO, show_governor, NULL), 591 __ATTR(governor, S_IRUGO, show_governor, NULL),
506 __ATTR(cur_freq, S_IRUGO, show_freq, NULL), 592 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
507 __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), 593 __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
508 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, 594 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
509 store_polling_interval), 595 store_polling_interval),
596 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
597 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
510 { }, 598 { },
511}; 599};
512 600
@@ -555,14 +643,30 @@ module_exit(devfreq_exit);
555 * freq value given to target callback. 643 * freq value given to target callback.
556 * @dev The devfreq user device. (parent of devfreq) 644 * @dev The devfreq user device. (parent of devfreq)
557 * @freq The frequency given to target function 645 * @freq The frequency given to target function
646 * @flags Flags handed from devfreq framework.
558 * 647 *
559 */ 648 */
560struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq) 649struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
650 u32 flags)
561{ 651{
562 struct opp *opp = opp_find_freq_ceil(dev, freq); 652 struct opp *opp;
563 653
564 if (opp == ERR_PTR(-ENODEV)) 654 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
655 /* The freq is an upper bound. opp should be lower */
565 opp = opp_find_freq_floor(dev, freq); 656 opp = opp_find_freq_floor(dev, freq);
657
658 /* If not available, use the closest opp */
659 if (opp == ERR_PTR(-ENODEV))
660 opp = opp_find_freq_ceil(dev, freq);
661 } else {
662 /* The freq is an lower bound. opp should be higher */
663 opp = opp_find_freq_ceil(dev, freq);
664
665 /* If not available, use the closest opp */
666 if (opp == ERR_PTR(-ENODEV))
667 opp = opp_find_freq_floor(dev, freq);
668 }
669
566 return opp; 670 return opp;
567} 671}
568 672
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 6460577d6701..1a361e99965a 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -619,15 +619,19 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
619 return err; 619 return err;
620} 620}
621 621
622static int exynos4_bus_target(struct device *dev, unsigned long *_freq) 622static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
623 u32 flags)
623{ 624{
624 int err = 0; 625 int err = 0;
625 struct platform_device *pdev = container_of(dev, struct platform_device, 626 struct platform_device *pdev = container_of(dev, struct platform_device,
626 dev); 627 dev);
627 struct busfreq_data *data = platform_get_drvdata(pdev); 628 struct busfreq_data *data = platform_get_drvdata(pdev);
628 struct opp *opp = devfreq_recommended_opp(dev, _freq); 629 struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
629 unsigned long old_freq = opp_get_freq(data->curr_opp);
630 unsigned long freq = opp_get_freq(opp); 630 unsigned long freq = opp_get_freq(opp);
631 unsigned long old_freq = opp_get_freq(data->curr_opp);
632
633 if (IS_ERR(opp))
634 return PTR_ERR(opp);
631 635
632 if (old_freq == freq) 636 if (old_freq == freq)
633 return 0; 637 return 0;
@@ -689,9 +693,7 @@ static int exynos4_get_busier_dmc(struct busfreq_data *data)
689static int exynos4_bus_get_dev_status(struct device *dev, 693static int exynos4_bus_get_dev_status(struct device *dev,
690 struct devfreq_dev_status *stat) 694 struct devfreq_dev_status *stat)
691{ 695{
692 struct platform_device *pdev = container_of(dev, struct platform_device, 696 struct busfreq_data *data = dev_get_drvdata(dev);
693 dev);
694 struct busfreq_data *data = platform_get_drvdata(pdev);
695 int busier_dmc; 697 int busier_dmc;
696 int cycles_x2 = 2; /* 2 x cycles */ 698 int cycles_x2 = 2; /* 2 x cycles */
697 void __iomem *addr; 699 void __iomem *addr;
@@ -739,9 +741,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
739 741
740static void exynos4_bus_exit(struct device *dev) 742static void exynos4_bus_exit(struct device *dev)
741{ 743{
742 struct platform_device *pdev = container_of(dev, struct platform_device, 744 struct busfreq_data *data = dev_get_drvdata(dev);
743 dev);
744 struct busfreq_data *data = platform_get_drvdata(pdev);
745 745
746 devfreq_unregister_opp_notifier(dev, data->devfreq); 746 devfreq_unregister_opp_notifier(dev, data->devfreq);
747} 747}
@@ -1087,9 +1087,7 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
1087 1087
1088static int exynos4_busfreq_resume(struct device *dev) 1088static int exynos4_busfreq_resume(struct device *dev)
1089{ 1089{
1090 struct platform_device *pdev = container_of(dev, struct platform_device, 1090 struct busfreq_data *data = dev_get_drvdata(dev);
1091 dev);
1092 struct busfreq_data *data = platform_get_drvdata(pdev);
1093 1091
1094 busfreq_mon_reset(data); 1092 busfreq_mon_reset(data);
1095 return 0; 1093 return 0;
@@ -1132,4 +1130,3 @@ module_exit(exynos4_busfreq_exit);
1132MODULE_LICENSE("GPL"); 1130MODULE_LICENSE("GPL");
1133MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework"); 1131MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
1134MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1132MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1135MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c0596b291761..574a06b1b1de 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -18,7 +18,10 @@ static int devfreq_performance_func(struct devfreq *df,
18 * target callback should be able to get floor value as 18 * target callback should be able to get floor value as
19 * said in devfreq.h 19 * said in devfreq.h
20 */ 20 */
21 *freq = UINT_MAX; 21 if (!df->max_freq)
22 *freq = UINT_MAX;
23 else
24 *freq = df->max_freq;
22 return 0; 25 return 0;
23} 26}
24 27
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 2483a85a266f..d742d4a82d6a 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -18,7 +18,7 @@ static int devfreq_powersave_func(struct devfreq *df,
18 * target callback should be able to get ceiling value as 18 * target callback should be able to get ceiling value as
19 * said in devfreq.h 19 * said in devfreq.h
20 */ 20 */
21 *freq = 0; 21 *freq = df->min_freq;
22 return 0; 22 return 0;
23} 23}
24 24
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index efad8dcf9028..a2e3eae79011 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -25,6 +25,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
25 unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; 25 unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
26 unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; 26 unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
27 struct devfreq_simple_ondemand_data *data = df->data; 27 struct devfreq_simple_ondemand_data *data = df->data;
28 unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
28 29
29 if (err) 30 if (err)
30 return err; 31 return err;
@@ -41,7 +42,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
41 42
42 /* Assume MAX if it is going to be divided by zero */ 43 /* Assume MAX if it is going to be divided by zero */
43 if (stat.total_time == 0) { 44 if (stat.total_time == 0) {
44 *freq = UINT_MAX; 45 *freq = max;
45 return 0; 46 return 0;
46 } 47 }
47 48
@@ -54,13 +55,13 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
54 /* Set MAX if it's busy enough */ 55 /* Set MAX if it's busy enough */
55 if (stat.busy_time * 100 > 56 if (stat.busy_time * 100 >
56 stat.total_time * dfso_upthreshold) { 57 stat.total_time * dfso_upthreshold) {
57 *freq = UINT_MAX; 58 *freq = max;
58 return 0; 59 return 0;
59 } 60 }
60 61
61 /* Set MAX if we do not know the initial frequency */ 62 /* Set MAX if we do not know the initial frequency */
62 if (stat.current_frequency == 0) { 63 if (stat.current_frequency == 0) {
63 *freq = UINT_MAX; 64 *freq = max;
64 return 0; 65 return 0;
65 } 66 }
66 67
@@ -79,6 +80,11 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
79 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); 80 b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
80 *freq = (unsigned long) b; 81 *freq = (unsigned long) b;
81 82
83 if (df->min_freq && *freq < df->min_freq)
84 *freq = df->min_freq;
85 if (df->max_freq && *freq > df->max_freq)
86 *freq = df->max_freq;
87
82 return 0; 88 return 0;
83} 89}
84 90
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 4f8b563da782..0681246fc89d 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -25,10 +25,19 @@ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
25{ 25{
26 struct userspace_data *data = df->data; 26 struct userspace_data *data = df->data;
27 27
28 if (!data->valid) 28 if (data->valid) {
29 unsigned long adjusted_freq = data->user_frequency;
30
31 if (df->max_freq && adjusted_freq > df->max_freq)
32 adjusted_freq = df->max_freq;
33
34 if (df->min_freq && adjusted_freq < df->min_freq)
35 adjusted_freq = df->min_freq;
36
37 *freq = adjusted_freq;
38 } else {
29 *freq = df->previous_freq; /* No user freq specified yet */ 39 *freq = df->previous_freq; /* No user freq specified yet */
30 else 40 }
31 *freq = data->user_frequency;
32 return 0; 41 return 0;
33} 42}
34 43
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 352d4797865b..75a485448796 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -56,6 +56,7 @@
56#include <linux/mmc/sh_mmcif.h> 56#include <linux/mmc/sh_mmcif.h>
57#include <linux/pagemap.h> 57#include <linux/pagemap.h>
58#include <linux/platform_device.h> 58#include <linux/platform_device.h>
59#include <linux/pm_qos.h>
59#include <linux/pm_runtime.h> 60#include <linux/pm_runtime.h>
60#include <linux/spinlock.h> 61#include <linux/spinlock.h>
61#include <linux/module.h> 62#include <linux/module.h>
@@ -1346,6 +1347,8 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1346 if (ret < 0) 1347 if (ret < 0)
1347 goto clean_up5; 1348 goto clean_up5;
1348 1349
1350 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1351
1349 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1352 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1350 dev_dbg(&pdev->dev, "chip ver H'%04x\n", 1353 dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1351 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1354 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
@@ -1376,6 +1379,8 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1376 host->dying = true; 1379 host->dying = true;
1377 pm_runtime_get_sync(&pdev->dev); 1380 pm_runtime_get_sync(&pdev->dev);
1378 1381
1382 dev_pm_qos_hide_latency_limit(&pdev->dev);
1383
1379 mmc_remove_host(host->mmc); 1384 mmc_remove_host(host->mmc);
1380 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1385 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1381 1386
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 5f9ad74fbf80..e21988901c36 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/pm_qos.h>
42#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
43#include <linux/scatterlist.h> 44#include <linux/scatterlist.h>
44#include <linux/spinlock.h> 45#include <linux/spinlock.h>
@@ -955,6 +956,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
955 956
956 mmc_add_host(mmc); 957 mmc_add_host(mmc);
957 958
959 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
960
958 /* Unmask the IRQs we want to know about */ 961 /* Unmask the IRQs we want to know about */
959 if (!_host->chan_rx) 962 if (!_host->chan_rx)
960 irq_mask |= TMIO_MASK_READOP; 963 irq_mask |= TMIO_MASK_READOP;
@@ -993,6 +996,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
993 || host->mmc->caps & MMC_CAP_NONREMOVABLE) 996 || host->mmc->caps & MMC_CAP_NONREMOVABLE)
994 pm_runtime_get_sync(&pdev->dev); 997 pm_runtime_get_sync(&pdev->dev);
995 998
999 dev_pm_qos_hide_latency_limit(&pdev->dev);
1000
996 mmc_remove_host(host->mmc); 1001 mmc_remove_host(host->mmc);
997 cancel_work_sync(&host->done); 1002 cancel_work_sync(&host->done);
998 cancel_delayed_work_sync(&host->delayed_reset_work); 1003 cancel_delayed_work_sync(&host->delayed_reset_work);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ce4fa0831860..9e14ae6cd49c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -129,9 +129,9 @@ static void do_suspend(void)
129 printk(KERN_DEBUG "suspending xenstore...\n"); 129 printk(KERN_DEBUG "suspending xenstore...\n");
130 xs_suspend(); 130 xs_suspend();
131 131
132 err = dpm_suspend_noirq(PMSG_FREEZE); 132 err = dpm_suspend_end(PMSG_FREEZE);
133 if (err) { 133 if (err) {
134 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); 134 printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
135 goto out_resume; 135 goto out_resume;
136 } 136 }
137 137
@@ -149,7 +149,7 @@ static void do_suspend(void)
149 149
150 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 150 err = stop_machine(xen_suspend, &si, cpumask_of(0));
151 151
152 dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 152 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
153 153
154 if (err) { 154 if (err) {
155 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 155 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);