aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c431
-rw-r--r--drivers/base/power/main.c64
-rw-r--r--drivers/base/power/runtime.c29
4 files changed, 473 insertions, 52 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 118c1b92a511..06a7073f9027 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG 9ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000000..c0dd09df7be8
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,431 @@
1/*
2 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
3 *
4 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_runtime.h>
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17
18#ifdef CONFIG_PM_RUNTIME
19
20struct pm_runtime_clk_data {
21 struct list_head clock_list;
22 struct mutex lock;
23};
24
25enum pce_status {
26 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED,
28 PCE_STATUS_ENABLED,
29 PCE_STATUS_ERROR,
30};
31
32struct pm_clock_entry {
33 struct list_head node;
34 char *con_id;
35 struct clk *clk;
36 enum pce_status status;
37};
38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM.
46 * @dev: Device whose clock is going to be used for runtime PM.
47 * @con_id: Connection ID of the clock.
48 *
49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev.
51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id)
53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev);
55 struct pm_clock_entry *ce;
56
57 if (!prd)
58 return -EINVAL;
59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
61 if (!ce) {
62 dev_err(dev, "Not enough memory for clock entry.\n");
63 return -ENOMEM;
64 }
65
66 if (con_id) {
67 ce->con_id = kstrdup(con_id, GFP_KERNEL);
68 if (!ce->con_id) {
69 dev_err(dev,
70 "Not enough memory for clock connection ID.\n");
71 kfree(ce);
72 return -ENOMEM;
73 }
74 }
75
76 mutex_lock(&prd->lock);
77 list_add_tail(&ce->node, &prd->clock_list);
78 mutex_unlock(&prd->lock);
79 return 0;
80}
81
82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
84 * @ce: Runtime PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the runtime PM list
87 * of clocks corresponding the the @ce's device.
88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
90{
91 if (!ce)
92 return;
93
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk);
99
100 if (ce->status >= PCE_STATUS_ACQUIRED)
101 clk_put(ce->clk);
102 }
103
104 if (ce->con_id)
105 kfree(ce->con_id);
106
107 kfree(ce);
108}
109
110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
112 * @dev: Device whose clock should not be used for runtime PM any more.
113 * @con_id: Connection ID of the clock.
114 *
115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev.
117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id)
119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev);
121 struct pm_clock_entry *ce;
122
123 if (!prd)
124 return;
125
126 mutex_lock(&prd->lock);
127
128 list_for_each_entry(ce, &prd->clock_list, node) {
129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce);
131 break;
132 } else if (!con_id || !ce->con_id) {
133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce);
136 break;
137 }
138 }
139
140 mutex_unlock(&prd->lock);
141}
142
143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for.
146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it.
149 */
150int pm_runtime_clk_init(struct device *dev)
151{
152 struct pm_runtime_clk_data *prd;
153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL);
155 if (!prd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n");
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&prd->clock_list);
161 mutex_init(&prd->lock);
162 dev->power.subsys_data = prd;
163 return 0;
164}
165
166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for.
169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free
172 * that object.
173 */
174void pm_runtime_clk_destroy(struct device *dev)
175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev);
177 struct pm_clock_entry *ce, *c;
178
179 if (!prd)
180 return;
181
182 dev->power.subsys_data = NULL;
183
184 mutex_lock(&prd->lock);
185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
187 __pm_runtime_clk_remove(ce);
188
189 mutex_unlock(&prd->lock);
190
191 kfree(prd);
192}
193
194/**
195 * pm_runtime_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock.
198 */
199static void pm_runtime_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce)
201{
202 ce->clk = clk_get(dev, ce->con_id);
203 if (IS_ERR(ce->clk)) {
204 ce->status = PCE_STATUS_ERROR;
205 } else {
206 ce->status = PCE_STATUS_ACQUIRED;
207 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
208 }
209}
210
211/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
213 * @dev: Device to disable the clocks for.
214 */
215int pm_runtime_clk_suspend(struct device *dev)
216{
217 struct pm_runtime_clk_data *prd = __to_prd(dev);
218 struct pm_clock_entry *ce;
219
220 dev_dbg(dev, "%s()\n", __func__);
221
222 if (!prd)
223 return 0;
224
225 mutex_lock(&prd->lock);
226
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce);
230
231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED;
234 }
235 }
236
237 mutex_unlock(&prd->lock);
238
239 return 0;
240}
241
242/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
244 * @dev: Device to enable the clocks for.
245 */
246int pm_runtime_clk_resume(struct device *dev)
247{
248 struct pm_runtime_clk_data *prd = __to_prd(dev);
249 struct pm_clock_entry *ce;
250
251 dev_dbg(dev, "%s()\n", __func__);
252
253 if (!prd)
254 return 0;
255
256 mutex_lock(&prd->lock);
257
258 list_for_each_entry(ce, &prd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce);
261
262 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk);
264 ce->status = PCE_STATUS_ENABLED;
265 }
266 }
267
268 mutex_unlock(&prd->lock);
269
270 return 0;
271}
272
273/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for.
278 *
279 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action.
284 *
285 * If the device's pwr_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing.
288 */
289static int pm_runtime_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data)
291{
292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data;
294 char *con_id;
295 int error;
296
297 dev_dbg(dev, "%s() %ld\n", __func__, action);
298
299 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
300
301 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain)
304 break;
305
306 error = pm_runtime_clk_init(dev);
307 if (error)
308 break;
309
310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id);
314 } else {
315 pm_runtime_clk_add(dev, NULL);
316 }
317
318 break;
319 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain)
321 break;
322
323 dev->pwr_domain = NULL;
324 pm_runtime_clk_destroy(dev);
325 break;
326 }
327
328 return 0;
329}
330
331#else /* !CONFIG_PM_RUNTIME */
332
333/**
334 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled.
336 * @con_id: Connection ID of the clock.
337 */
338static void enable_clock(struct device *dev, const char *con_id)
339{
340 struct clk *clk;
341
342 clk = clk_get(dev, con_id);
343 if (!IS_ERR(clk)) {
344 clk_enable(clk);
345 clk_put(clk);
346 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
347 }
348}
349
350/**
351 * disable_clock - Disable a device clock.
352 * @dev: Device whose clock is to be disabled.
353 * @con_id: Connection ID of the clock.
354 */
355static void disable_clock(struct device *dev, const char *con_id)
356{
357 struct clk *clk;
358
359 clk = clk_get(dev, con_id);
360 if (!IS_ERR(clk)) {
361 clk_disable(clk);
362 clk_put(clk);
363 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
364 }
365}
366
367/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for.
372 *
373 * For this function to work, @nb must be a member of an object of type
374 * struct pm_clk_notifier_block containing all of the requisite data.
375 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action.
377 */
378static int pm_runtime_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data;
383 char *con_id;
384
385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388
389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
393 enable_clock(dev, con_id);
394 } else {
395 enable_clock(dev, NULL);
396 }
397 break;
398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
401 disable_clock(dev, con_id);
402 } else {
403 disable_clock(dev, NULL);
404 }
405 break;
406 }
407
408 return 0;
409}
410
411#endif /* !CONFIG_PM_RUNTIME */
412
413/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
415 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type.
417 *
418 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this
421 * routine.
422 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb)
425{
426 if (!bus || !clknb)
427 return;
428
429 clknb->nb.notifier_call = pm_runtime_clk_notify;
430 bus_register_notifier(bus, &clknb->nb);
431}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..3b354560f306 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -629,12 +628,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 628{
630 device_lock(dev); 629 device_lock(dev);
631 630
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 631 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 632 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 633 if (dev->pwr_domain->ops.complete)
635 } 634 dev->pwr_domain->ops.complete(dev);
636 635 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 636 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 637 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 638 dev->type->pm->complete(dev);
@@ -732,7 +730,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 730{
733 int error; 731 int error;
734 732
735 if (dev->type && dev->type->pm) { 733 if (dev->pwr_domain) {
734 pm_dev_dbg(dev, state, "LATE power domain ");
735 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
736 if (error)
737 return error;
738 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 739 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 740 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 741 if (error)
@@ -749,11 +752,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 752 return error;
750 } 753 }
751 754
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 755 return 0;
758} 756}
759 757
@@ -841,21 +839,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 839 goto End;
842 } 840 }
843 841
842 if (dev->pwr_domain) {
843 pm_dev_dbg(dev, state, "power domain ");
844 error = pm_op(dev, &dev->pwr_domain->ops, state);
845 goto End;
846 }
847
844 if (dev->type && dev->type->pm) { 848 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 849 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 850 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 851 goto End;
848 } 852 }
849 853
850 if (dev->class) { 854 if (dev->class) {
851 if (dev->class->pm) { 855 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 856 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 857 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 858 goto End;
855 } else if (dev->class->suspend) { 859 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 860 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 861 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 862 goto End;
859 } 863 }
860 } 864 }
861 865
@@ -869,12 +873,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 873 }
870 } 874 }
871 875
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 876 End:
879 device_unlock(dev); 877 device_unlock(dev);
880 complete_all(&dev->power.completion); 878 complete_all(&dev->power.completion);
@@ -965,7 +963,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 963
966 device_lock(dev); 964 device_lock(dev);
967 965
968 if (dev->type && dev->type->pm) { 966 if (dev->pwr_domain) {
967 pm_dev_dbg(dev, state, "preparing power domain ");
968 if (dev->pwr_domain->ops.prepare)
969 error = dev->pwr_domain->ops.prepare(dev);
970 suspend_report_result(dev->pwr_domain->ops.prepare, error);
971 if (error)
972 goto End;
973 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 974 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 975 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 976 error = dev->type->pm->prepare(dev);
@@ -984,13 +989,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 989 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 990 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 991 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 992 }
995 993
996 End: 994 End:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;