aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-05-17 17:23:46 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-05-17 17:23:46 -0400
commit290c748725c170ed9a02522959ae67f528eefe98 (patch)
treea920190b75c7e054af24d850e79cc54f5cf53263 /drivers/base
parent2d2a9163bd4f3ba301f8138c32e4790edc30156c (diff)
parent72874daa5e9064c4e8d689e6a04b1e96f687f872 (diff)
Merge branch 'power-domains' into for-linus
* power-domains: PM: Fix build issue in clock_ops.c for CONFIG_PM_RUNTIME unset PM: Revert "driver core: platform_bus: allow runtime override of dev_pm_ops" OMAP1 / PM: Use generic clock manipulation routines for runtime PM PM / Runtime: Generic clock manipulation rountines for runtime PM (v6) PM / Runtime: Add subsystem data field to struct dev_pm_info OMAP2+ / PM: move runtime PM implementation to use device power domains PM / Platform: Use generic runtime PM callbacks directly shmobile: Use power domains for platform runtime PM PM: Export platform bus type's default PM callbacks PM: Make power domain callbacks take precedence over subsystem ones
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/platform.c138
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c431
-rw-r--r--drivers/base/power/main.c64
-rw-r--r--drivers/base/power/runtime.c29
5 files changed, 494 insertions, 169 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e0e4fc24c46..48425f183029 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev)
667 return ret; 667 return ret;
668} 668}
669 669
670static int platform_pm_prepare(struct device *dev) 670int platform_pm_prepare(struct device *dev)
671{ 671{
672 struct device_driver *drv = dev->driver; 672 struct device_driver *drv = dev->driver;
673 int ret = 0; 673 int ret = 0;
@@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev)
678 return ret; 678 return ret;
679} 679}
680 680
681static void platform_pm_complete(struct device *dev) 681void platform_pm_complete(struct device *dev)
682{ 682{
683 struct device_driver *drv = dev->driver; 683 struct device_driver *drv = dev->driver;
684 684
@@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev)
686 drv->pm->complete(dev); 686 drv->pm->complete(dev);
687} 687}
688 688
689#else /* !CONFIG_PM_SLEEP */ 689#endif /* CONFIG_PM_SLEEP */
690
691#define platform_pm_prepare NULL
692#define platform_pm_complete NULL
693
694#endif /* !CONFIG_PM_SLEEP */
695 690
696#ifdef CONFIG_SUSPEND 691#ifdef CONFIG_SUSPEND
697 692
698int __weak platform_pm_suspend(struct device *dev) 693int platform_pm_suspend(struct device *dev)
699{ 694{
700 struct device_driver *drv = dev->driver; 695 struct device_driver *drv = dev->driver;
701 int ret = 0; 696 int ret = 0;
@@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev)
713 return ret; 708 return ret;
714} 709}
715 710
716int __weak platform_pm_suspend_noirq(struct device *dev) 711int platform_pm_suspend_noirq(struct device *dev)
717{ 712{
718 struct device_driver *drv = dev->driver; 713 struct device_driver *drv = dev->driver;
719 int ret = 0; 714 int ret = 0;
@@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev)
729 return ret; 724 return ret;
730} 725}
731 726
732int __weak platform_pm_resume(struct device *dev) 727int platform_pm_resume(struct device *dev)
733{ 728{
734 struct device_driver *drv = dev->driver; 729 struct device_driver *drv = dev->driver;
735 int ret = 0; 730 int ret = 0;
@@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev)
747 return ret; 742 return ret;
748} 743}
749 744
750int __weak platform_pm_resume_noirq(struct device *dev) 745int platform_pm_resume_noirq(struct device *dev)
751{ 746{
752 struct device_driver *drv = dev->driver; 747 struct device_driver *drv = dev->driver;
753 int ret = 0; 748 int ret = 0;
@@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev)
763 return ret; 758 return ret;
764} 759}
765 760
766#else /* !CONFIG_SUSPEND */ 761#endif /* CONFIG_SUSPEND */
767
768#define platform_pm_suspend NULL
769#define platform_pm_resume NULL
770#define platform_pm_suspend_noirq NULL
771#define platform_pm_resume_noirq NULL
772
773#endif /* !CONFIG_SUSPEND */
774 762
775#ifdef CONFIG_HIBERNATE_CALLBACKS 763#ifdef CONFIG_HIBERNATE_CALLBACKS
776 764
777static int platform_pm_freeze(struct device *dev) 765int platform_pm_freeze(struct device *dev)
778{ 766{
779 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
780 int ret = 0; 768 int ret = 0;
@@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev)
792 return ret; 780 return ret;
793} 781}
794 782
795static int platform_pm_freeze_noirq(struct device *dev) 783int platform_pm_freeze_noirq(struct device *dev)
796{ 784{
797 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
798 int ret = 0; 786 int ret = 0;
@@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev)
808 return ret; 796 return ret;
809} 797}
810 798
811static int platform_pm_thaw(struct device *dev) 799int platform_pm_thaw(struct device *dev)
812{ 800{
813 struct device_driver *drv = dev->driver; 801 struct device_driver *drv = dev->driver;
814 int ret = 0; 802 int ret = 0;
@@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev)
826 return ret; 814 return ret;
827} 815}
828 816
829static int platform_pm_thaw_noirq(struct device *dev) 817int platform_pm_thaw_noirq(struct device *dev)
830{ 818{
831 struct device_driver *drv = dev->driver; 819 struct device_driver *drv = dev->driver;
832 int ret = 0; 820 int ret = 0;
@@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev)
842 return ret; 830 return ret;
843} 831}
844 832
845static int platform_pm_poweroff(struct device *dev) 833int platform_pm_poweroff(struct device *dev)
846{ 834{
847 struct device_driver *drv = dev->driver; 835 struct device_driver *drv = dev->driver;
848 int ret = 0; 836 int ret = 0;
@@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev)
860 return ret; 848 return ret;
861} 849}
862 850
863static int platform_pm_poweroff_noirq(struct device *dev) 851int platform_pm_poweroff_noirq(struct device *dev)
864{ 852{
865 struct device_driver *drv = dev->driver; 853 struct device_driver *drv = dev->driver;
866 int ret = 0; 854 int ret = 0;
@@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev)
876 return ret; 864 return ret;
877} 865}
878 866
879static int platform_pm_restore(struct device *dev) 867int platform_pm_restore(struct device *dev)
880{ 868{
881 struct device_driver *drv = dev->driver; 869 struct device_driver *drv = dev->driver;
882 int ret = 0; 870 int ret = 0;
@@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev)
894 return ret; 882 return ret;
895} 883}
896 884
897static int platform_pm_restore_noirq(struct device *dev) 885int platform_pm_restore_noirq(struct device *dev)
898{ 886{
899 struct device_driver *drv = dev->driver; 887 struct device_driver *drv = dev->driver;
900 int ret = 0; 888 int ret = 0;
@@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev)
910 return ret; 898 return ret;
911} 899}
912 900
913#else /* !CONFIG_HIBERNATE_CALLBACKS */ 901#endif /* CONFIG_HIBERNATE_CALLBACKS */
914
915#define platform_pm_freeze NULL
916#define platform_pm_thaw NULL
917#define platform_pm_poweroff NULL
918#define platform_pm_restore NULL
919#define platform_pm_freeze_noirq NULL
920#define platform_pm_thaw_noirq NULL
921#define platform_pm_poweroff_noirq NULL
922#define platform_pm_restore_noirq NULL
923
924#endif /* !CONFIG_HIBERNATE_CALLBACKS */
925
926#ifdef CONFIG_PM_RUNTIME
927
928int __weak platform_pm_runtime_suspend(struct device *dev)
929{
930 return pm_generic_runtime_suspend(dev);
931};
932
933int __weak platform_pm_runtime_resume(struct device *dev)
934{
935 return pm_generic_runtime_resume(dev);
936};
937
938int __weak platform_pm_runtime_idle(struct device *dev)
939{
940 return pm_generic_runtime_idle(dev);
941};
942
943#else /* !CONFIG_PM_RUNTIME */
944
945#define platform_pm_runtime_suspend NULL
946#define platform_pm_runtime_resume NULL
947#define platform_pm_runtime_idle NULL
948
949#endif /* !CONFIG_PM_RUNTIME */
950 902
951static const struct dev_pm_ops platform_dev_pm_ops = { 903static const struct dev_pm_ops platform_dev_pm_ops = {
952 .prepare = platform_pm_prepare, 904 .runtime_suspend = pm_generic_runtime_suspend,
953 .complete = platform_pm_complete, 905 .runtime_resume = pm_generic_runtime_resume,
954 .suspend = platform_pm_suspend, 906 .runtime_idle = pm_generic_runtime_idle,
955 .resume = platform_pm_resume, 907 USE_PLATFORM_PM_SLEEP_OPS
956 .freeze = platform_pm_freeze,
957 .thaw = platform_pm_thaw,
958 .poweroff = platform_pm_poweroff,
959 .restore = platform_pm_restore,
960 .suspend_noirq = platform_pm_suspend_noirq,
961 .resume_noirq = platform_pm_resume_noirq,
962 .freeze_noirq = platform_pm_freeze_noirq,
963 .thaw_noirq = platform_pm_thaw_noirq,
964 .poweroff_noirq = platform_pm_poweroff_noirq,
965 .restore_noirq = platform_pm_restore_noirq,
966 .runtime_suspend = platform_pm_runtime_suspend,
967 .runtime_resume = platform_pm_runtime_resume,
968 .runtime_idle = platform_pm_runtime_idle,
969}; 908};
970 909
971struct bus_type platform_bus_type = { 910struct bus_type platform_bus_type = {
@@ -977,41 +916,6 @@ struct bus_type platform_bus_type = {
977}; 916};
978EXPORT_SYMBOL_GPL(platform_bus_type); 917EXPORT_SYMBOL_GPL(platform_bus_type);
979 918
980/**
981 * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
982 *
983 * This function can be used by platform code to get the current
984 * set of dev_pm_ops functions used by the platform_bus_type.
985 */
986const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
987{
988 return platform_bus_type.pm;
989}
990
991/**
992 * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
993 *
994 * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
995 *
996 * Platform code can override the dev_pm_ops methods of
997 * platform_bus_type by using this function. It is expected that
998 * platform code will first do a platform_bus_get_pm_ops(), then
999 * kmemdup it, then customize selected methods and pass a pointer to
1000 * the new struct dev_pm_ops to this function.
1001 *
1002 * Since platform-specific code is customizing methods for *all*
1003 * devices (not just platform-specific devices) it is expected that
1004 * any custom overrides of these functions will keep existing behavior
1005 * and simply extend it. For example, any customization of the
1006 * runtime PM methods should continue to call the pm_generic_*
1007 * functions as the default ones do in addition to the
1008 * platform-specific behavior.
1009 */
1010void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
1011{
1012 platform_bus_type.pm = pm;
1013}
1014
1015int __init platform_bus_init(void) 919int __init platform_bus_init(void)
1016{ 920{
1017 int error; 921 int error;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 118c1b92a511..06a7073f9027 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG 9ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000000..c0dd09df7be8
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,431 @@
1/*
2 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
3 *
4 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_runtime.h>
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17
18#ifdef CONFIG_PM_RUNTIME
19
20struct pm_runtime_clk_data {
21 struct list_head clock_list;
22 struct mutex lock;
23};
24
25enum pce_status {
26 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED,
28 PCE_STATUS_ENABLED,
29 PCE_STATUS_ERROR,
30};
31
32struct pm_clock_entry {
33 struct list_head node;
34 char *con_id;
35 struct clk *clk;
36 enum pce_status status;
37};
38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM.
46 * @dev: Device whose clock is going to be used for runtime PM.
47 * @con_id: Connection ID of the clock.
48 *
49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev.
51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id)
53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev);
55 struct pm_clock_entry *ce;
56
57 if (!prd)
58 return -EINVAL;
59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
61 if (!ce) {
62 dev_err(dev, "Not enough memory for clock entry.\n");
63 return -ENOMEM;
64 }
65
66 if (con_id) {
67 ce->con_id = kstrdup(con_id, GFP_KERNEL);
68 if (!ce->con_id) {
69 dev_err(dev,
70 "Not enough memory for clock connection ID.\n");
71 kfree(ce);
72 return -ENOMEM;
73 }
74 }
75
76 mutex_lock(&prd->lock);
77 list_add_tail(&ce->node, &prd->clock_list);
78 mutex_unlock(&prd->lock);
79 return 0;
80}
81
82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
84 * @ce: Runtime PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the runtime PM list
87 * of clocks corresponding the the @ce's device.
88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
90{
91 if (!ce)
92 return;
93
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk);
99
100 if (ce->status >= PCE_STATUS_ACQUIRED)
101 clk_put(ce->clk);
102 }
103
104 if (ce->con_id)
105 kfree(ce->con_id);
106
107 kfree(ce);
108}
109
110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
112 * @dev: Device whose clock should not be used for runtime PM any more.
113 * @con_id: Connection ID of the clock.
114 *
115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev.
117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id)
119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev);
121 struct pm_clock_entry *ce;
122
123 if (!prd)
124 return;
125
126 mutex_lock(&prd->lock);
127
128 list_for_each_entry(ce, &prd->clock_list, node) {
129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce);
131 break;
132 } else if (!con_id || !ce->con_id) {
133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce);
136 break;
137 }
138 }
139
140 mutex_unlock(&prd->lock);
141}
142
143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for.
146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it.
149 */
150int pm_runtime_clk_init(struct device *dev)
151{
152 struct pm_runtime_clk_data *prd;
153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL);
155 if (!prd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n");
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&prd->clock_list);
161 mutex_init(&prd->lock);
162 dev->power.subsys_data = prd;
163 return 0;
164}
165
166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for.
169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free
172 * that object.
173 */
174void pm_runtime_clk_destroy(struct device *dev)
175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev);
177 struct pm_clock_entry *ce, *c;
178
179 if (!prd)
180 return;
181
182 dev->power.subsys_data = NULL;
183
184 mutex_lock(&prd->lock);
185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
187 __pm_runtime_clk_remove(ce);
188
189 mutex_unlock(&prd->lock);
190
191 kfree(prd);
192}
193
194/**
195 * pm_runtime_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock.
198 */
199static void pm_runtime_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce)
201{
202 ce->clk = clk_get(dev, ce->con_id);
203 if (IS_ERR(ce->clk)) {
204 ce->status = PCE_STATUS_ERROR;
205 } else {
206 ce->status = PCE_STATUS_ACQUIRED;
207 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
208 }
209}
210
211/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
213 * @dev: Device to disable the clocks for.
214 */
215int pm_runtime_clk_suspend(struct device *dev)
216{
217 struct pm_runtime_clk_data *prd = __to_prd(dev);
218 struct pm_clock_entry *ce;
219
220 dev_dbg(dev, "%s()\n", __func__);
221
222 if (!prd)
223 return 0;
224
225 mutex_lock(&prd->lock);
226
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce);
230
231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED;
234 }
235 }
236
237 mutex_unlock(&prd->lock);
238
239 return 0;
240}
241
242/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
244 * @dev: Device to enable the clocks for.
245 */
246int pm_runtime_clk_resume(struct device *dev)
247{
248 struct pm_runtime_clk_data *prd = __to_prd(dev);
249 struct pm_clock_entry *ce;
250
251 dev_dbg(dev, "%s()\n", __func__);
252
253 if (!prd)
254 return 0;
255
256 mutex_lock(&prd->lock);
257
258 list_for_each_entry(ce, &prd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce);
261
262 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk);
264 ce->status = PCE_STATUS_ENABLED;
265 }
266 }
267
268 mutex_unlock(&prd->lock);
269
270 return 0;
271}
272
273/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for.
278 *
279 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action.
284 *
285 * If the device's pwr_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing.
288 */
289static int pm_runtime_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data)
291{
292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data;
294 char *con_id;
295 int error;
296
297 dev_dbg(dev, "%s() %ld\n", __func__, action);
298
299 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
300
301 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain)
304 break;
305
306 error = pm_runtime_clk_init(dev);
307 if (error)
308 break;
309
310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id);
314 } else {
315 pm_runtime_clk_add(dev, NULL);
316 }
317
318 break;
319 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain)
321 break;
322
323 dev->pwr_domain = NULL;
324 pm_runtime_clk_destroy(dev);
325 break;
326 }
327
328 return 0;
329}
330
331#else /* !CONFIG_PM_RUNTIME */
332
333/**
334 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled.
336 * @con_id: Connection ID of the clock.
337 */
338static void enable_clock(struct device *dev, const char *con_id)
339{
340 struct clk *clk;
341
342 clk = clk_get(dev, con_id);
343 if (!IS_ERR(clk)) {
344 clk_enable(clk);
345 clk_put(clk);
346 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
347 }
348}
349
350/**
351 * disable_clock - Disable a device clock.
352 * @dev: Device whose clock is to be disabled.
353 * @con_id: Connection ID of the clock.
354 */
355static void disable_clock(struct device *dev, const char *con_id)
356{
357 struct clk *clk;
358
359 clk = clk_get(dev, con_id);
360 if (!IS_ERR(clk)) {
361 clk_disable(clk);
362 clk_put(clk);
363 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
364 }
365}
366
367/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for.
372 *
373 * For this function to work, @nb must be a member of an object of type
374 * struct pm_clk_notifier_block containing all of the requisite data.
375 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action.
377 */
378static int pm_runtime_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data;
383 char *con_id;
384
385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388
389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
393 enable_clock(dev, con_id);
394 } else {
395 enable_clock(dev, NULL);
396 }
397 break;
398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
401 disable_clock(dev, con_id);
402 } else {
403 disable_clock(dev, NULL);
404 }
405 break;
406 }
407
408 return 0;
409}
410
411#endif /* !CONFIG_PM_RUNTIME */
412
413/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
415 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type.
417 *
418 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this
421 * routine.
422 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb)
425{
426 if (!bus || !clknb)
427 return;
428
429 clknb->nb.notifier_call = pm_runtime_clk_notify;
430 bus_register_notifier(bus, &clknb->nb);
431}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..3b354560f306 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -629,12 +628,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 628{
630 device_lock(dev); 629 device_lock(dev);
631 630
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 631 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 632 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 633 if (dev->pwr_domain->ops.complete)
635 } 634 dev->pwr_domain->ops.complete(dev);
636 635 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 636 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 637 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 638 dev->type->pm->complete(dev);
@@ -732,7 +730,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 730{
733 int error; 731 int error;
734 732
735 if (dev->type && dev->type->pm) { 733 if (dev->pwr_domain) {
734 pm_dev_dbg(dev, state, "LATE power domain ");
735 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
736 if (error)
737 return error;
738 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 739 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 740 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 741 if (error)
@@ -749,11 +752,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 752 return error;
750 } 753 }
751 754
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 755 return 0;
758} 756}
759 757
@@ -841,21 +839,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 839 goto End;
842 } 840 }
843 841
842 if (dev->pwr_domain) {
843 pm_dev_dbg(dev, state, "power domain ");
844 error = pm_op(dev, &dev->pwr_domain->ops, state);
845 goto End;
846 }
847
844 if (dev->type && dev->type->pm) { 848 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 849 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 850 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 851 goto End;
848 } 852 }
849 853
850 if (dev->class) { 854 if (dev->class) {
851 if (dev->class->pm) { 855 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 856 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 857 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 858 goto End;
855 } else if (dev->class->suspend) { 859 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 860 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 861 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 862 goto End;
859 } 863 }
860 } 864 }
861 865
@@ -869,12 +873,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 873 }
870 } 874 }
871 875
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 876 End:
879 device_unlock(dev); 877 device_unlock(dev);
880 complete_all(&dev->power.completion); 878 complete_all(&dev->power.completion);
@@ -965,7 +963,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 963
966 device_lock(dev); 964 device_lock(dev);
967 965
968 if (dev->type && dev->type->pm) { 966 if (dev->pwr_domain) {
967 pm_dev_dbg(dev, state, "preparing power domain ");
968 if (dev->pwr_domain->ops.prepare)
969 error = dev->pwr_domain->ops.prepare(dev);
970 suspend_report_result(dev->pwr_domain->ops.prepare, error);
971 if (error)
972 goto End;
973 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 974 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 975 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 976 error = dev->type->pm->prepare(dev);
@@ -984,13 +989,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 989 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 990 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 991 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 992 }
995 993
996 End: 994 End:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;