aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/clock_ops.c431
-rw-r--r--drivers/base/power/generic_ops.c39
-rw-r--r--drivers/base/power/main.c82
-rw-r--r--drivers/base/power/runtime.c29
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/power/wakeup.c1
7 files changed, 527 insertions, 63 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 118c1b92a511..3647e114d0e7 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000000..c0dd09df7be8
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,431 @@
1/*
2 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
3 *
4 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_runtime.h>
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17
18#ifdef CONFIG_PM_RUNTIME
19
20struct pm_runtime_clk_data {
21 struct list_head clock_list;
22 struct mutex lock;
23};
24
25enum pce_status {
26 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED,
28 PCE_STATUS_ENABLED,
29 PCE_STATUS_ERROR,
30};
31
32struct pm_clock_entry {
33 struct list_head node;
34 char *con_id;
35 struct clk *clk;
36 enum pce_status status;
37};
38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM.
46 * @dev: Device whose clock is going to be used for runtime PM.
47 * @con_id: Connection ID of the clock.
48 *
49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev.
51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id)
53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev);
55 struct pm_clock_entry *ce;
56
57 if (!prd)
58 return -EINVAL;
59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
61 if (!ce) {
62 dev_err(dev, "Not enough memory for clock entry.\n");
63 return -ENOMEM;
64 }
65
66 if (con_id) {
67 ce->con_id = kstrdup(con_id, GFP_KERNEL);
68 if (!ce->con_id) {
69 dev_err(dev,
70 "Not enough memory for clock connection ID.\n");
71 kfree(ce);
72 return -ENOMEM;
73 }
74 }
75
76 mutex_lock(&prd->lock);
77 list_add_tail(&ce->node, &prd->clock_list);
78 mutex_unlock(&prd->lock);
79 return 0;
80}
81
82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
84 * @ce: Runtime PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the runtime PM list
87 * of clocks corresponding the the @ce's device.
88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
90{
91 if (!ce)
92 return;
93
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk);
99
100 if (ce->status >= PCE_STATUS_ACQUIRED)
101 clk_put(ce->clk);
102 }
103
104 if (ce->con_id)
105 kfree(ce->con_id);
106
107 kfree(ce);
108}
109
110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
112 * @dev: Device whose clock should not be used for runtime PM any more.
113 * @con_id: Connection ID of the clock.
114 *
115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev.
117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id)
119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev);
121 struct pm_clock_entry *ce;
122
123 if (!prd)
124 return;
125
126 mutex_lock(&prd->lock);
127
128 list_for_each_entry(ce, &prd->clock_list, node) {
129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce);
131 break;
132 } else if (!con_id || !ce->con_id) {
133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce);
136 break;
137 }
138 }
139
140 mutex_unlock(&prd->lock);
141}
142
143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for.
146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it.
149 */
150int pm_runtime_clk_init(struct device *dev)
151{
152 struct pm_runtime_clk_data *prd;
153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL);
155 if (!prd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n");
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&prd->clock_list);
161 mutex_init(&prd->lock);
162 dev->power.subsys_data = prd;
163 return 0;
164}
165
166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for.
169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free
172 * that object.
173 */
174void pm_runtime_clk_destroy(struct device *dev)
175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev);
177 struct pm_clock_entry *ce, *c;
178
179 if (!prd)
180 return;
181
182 dev->power.subsys_data = NULL;
183
184 mutex_lock(&prd->lock);
185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
187 __pm_runtime_clk_remove(ce);
188
189 mutex_unlock(&prd->lock);
190
191 kfree(prd);
192}
193
194/**
195 * pm_runtime_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock.
198 */
199static void pm_runtime_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce)
201{
202 ce->clk = clk_get(dev, ce->con_id);
203 if (IS_ERR(ce->clk)) {
204 ce->status = PCE_STATUS_ERROR;
205 } else {
206 ce->status = PCE_STATUS_ACQUIRED;
207 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
208 }
209}
210
211/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
213 * @dev: Device to disable the clocks for.
214 */
215int pm_runtime_clk_suspend(struct device *dev)
216{
217 struct pm_runtime_clk_data *prd = __to_prd(dev);
218 struct pm_clock_entry *ce;
219
220 dev_dbg(dev, "%s()\n", __func__);
221
222 if (!prd)
223 return 0;
224
225 mutex_lock(&prd->lock);
226
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce);
230
231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED;
234 }
235 }
236
237 mutex_unlock(&prd->lock);
238
239 return 0;
240}
241
242/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
244 * @dev: Device to enable the clocks for.
245 */
246int pm_runtime_clk_resume(struct device *dev)
247{
248 struct pm_runtime_clk_data *prd = __to_prd(dev);
249 struct pm_clock_entry *ce;
250
251 dev_dbg(dev, "%s()\n", __func__);
252
253 if (!prd)
254 return 0;
255
256 mutex_lock(&prd->lock);
257
258 list_for_each_entry(ce, &prd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce);
261
262 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk);
264 ce->status = PCE_STATUS_ENABLED;
265 }
266 }
267
268 mutex_unlock(&prd->lock);
269
270 return 0;
271}
272
273/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for.
278 *
279 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action.
284 *
285 * If the device's pwr_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing.
288 */
289static int pm_runtime_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data)
291{
292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data;
294 char *con_id;
295 int error;
296
297 dev_dbg(dev, "%s() %ld\n", __func__, action);
298
299 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
300
301 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain)
304 break;
305
306 error = pm_runtime_clk_init(dev);
307 if (error)
308 break;
309
310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id);
314 } else {
315 pm_runtime_clk_add(dev, NULL);
316 }
317
318 break;
319 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain)
321 break;
322
323 dev->pwr_domain = NULL;
324 pm_runtime_clk_destroy(dev);
325 break;
326 }
327
328 return 0;
329}
330
331#else /* !CONFIG_PM_RUNTIME */
332
333/**
334 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled.
336 * @con_id: Connection ID of the clock.
337 */
338static void enable_clock(struct device *dev, const char *con_id)
339{
340 struct clk *clk;
341
342 clk = clk_get(dev, con_id);
343 if (!IS_ERR(clk)) {
344 clk_enable(clk);
345 clk_put(clk);
346 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
347 }
348}
349
350/**
351 * disable_clock - Disable a device clock.
352 * @dev: Device whose clock is to be disabled.
353 * @con_id: Connection ID of the clock.
354 */
355static void disable_clock(struct device *dev, const char *con_id)
356{
357 struct clk *clk;
358
359 clk = clk_get(dev, con_id);
360 if (!IS_ERR(clk)) {
361 clk_disable(clk);
362 clk_put(clk);
363 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
364 }
365}
366
367/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for.
372 *
373 * For this function to work, @nb must be a member of an object of type
374 * struct pm_clk_notifier_block containing all of the requisite data.
375 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action.
377 */
378static int pm_runtime_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data;
383 char *con_id;
384
385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388
389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
393 enable_clock(dev, con_id);
394 } else {
395 enable_clock(dev, NULL);
396 }
397 break;
398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
401 disable_clock(dev, con_id);
402 } else {
403 disable_clock(dev, NULL);
404 }
405 break;
406 }
407
408 return 0;
409}
410
411#endif /* !CONFIG_PM_RUNTIME */
412
413/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
415 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type.
417 *
418 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this
421 * routine.
422 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb)
425{
426 if (!bus || !clknb)
427 return;
428
429 clknb->nb.notifier_call = pm_runtime_clk_notify;
430 bus_register_notifier(bus, &clknb->nb);
431}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 42f97f925629..cb3bb368681c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
74 74
75#ifdef CONFIG_PM_SLEEP 75#ifdef CONFIG_PM_SLEEP
76/** 76/**
77 * pm_generic_prepare - Generic routine preparing a device for power transition.
78 * @dev: Device to prepare.
79 *
80 * Prepare a device for a system-wide power transition.
81 */
82int pm_generic_prepare(struct device *dev)
83{
84 struct device_driver *drv = dev->driver;
85 int ret = 0;
86
87 if (drv && drv->pm && drv->pm->prepare)
88 ret = drv->pm->prepare(dev);
89
90 return ret;
91}
92
93/**
77 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
78 * @dev: Device to handle. 95 * @dev: Device to handle.
79 * @event: PM transition of the system under way. 96 * @event: PM transition of the system under way.
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev)
213 return __pm_generic_resume(dev, PM_EVENT_RESTORE); 230 return __pm_generic_resume(dev, PM_EVENT_RESTORE);
214} 231}
215EXPORT_SYMBOL_GPL(pm_generic_restore); 232EXPORT_SYMBOL_GPL(pm_generic_restore);
233
234/**
235 * pm_generic_complete - Generic routine competing a device power transition.
236 * @dev: Device to handle.
237 *
238 * Complete a device power transition during a system-wide power transition.
239 */
240void pm_generic_complete(struct device *dev)
241{
242 struct device_driver *drv = dev->driver;
243
244 if (drv && drv->pm && drv->pm->complete)
245 drv->pm->complete(dev);
246
247 /*
248 * Let runtime PM try to suspend devices that haven't been in use before
249 * going into the system-wide sleep state we're resuming from.
250 */
251 pm_runtime_idle(dev);
252}
216#endif /* CONFIG_PM_SLEEP */ 253#endif /* CONFIG_PM_SLEEP */
217 254
218struct dev_pm_ops generic_subsys_pm_ops = { 255struct dev_pm_ops generic_subsys_pm_ops = {
219#ifdef CONFIG_PM_SLEEP 256#ifdef CONFIG_PM_SLEEP
257 .prepare = pm_generic_prepare,
220 .suspend = pm_generic_suspend, 258 .suspend = pm_generic_suspend,
221 .resume = pm_generic_resume, 259 .resume = pm_generic_resume,
222 .freeze = pm_generic_freeze, 260 .freeze = pm_generic_freeze,
223 .thaw = pm_generic_thaw, 261 .thaw = pm_generic_thaw,
224 .poweroff = pm_generic_poweroff, 262 .poweroff = pm_generic_poweroff,
225 .restore = pm_generic_restore, 263 .restore = pm_generic_restore,
264 .complete = pm_generic_complete,
226#endif 265#endif
227#ifdef CONFIG_PM_RUNTIME 266#ifdef CONFIG_PM_RUNTIME
228 .runtime_suspend = pm_generic_runtime_suspend, 267 .runtime_suspend = pm_generic_runtime_suspend,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..aa6320207745 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -580,11 +579,13 @@ static bool is_async(struct device *dev)
580 * Execute the appropriate "resume" callback for all devices whose status 579 * Execute the appropriate "resume" callback for all devices whose status
581 * indicates that they are suspended. 580 * indicates that they are suspended.
582 */ 581 */
583static void dpm_resume(pm_message_t state) 582void dpm_resume(pm_message_t state)
584{ 583{
585 struct device *dev; 584 struct device *dev;
586 ktime_t starttime = ktime_get(); 585 ktime_t starttime = ktime_get();
587 586
587 might_sleep();
588
588 mutex_lock(&dpm_list_mtx); 589 mutex_lock(&dpm_list_mtx);
589 pm_transition = state; 590 pm_transition = state;
590 async_error = 0; 591 async_error = 0;
@@ -629,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 630{
630 device_lock(dev); 631 device_lock(dev);
631 632
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 633 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 634 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 635 if (dev->pwr_domain->ops.complete)
635 } 636 dev->pwr_domain->ops.complete(dev);
636 637 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 638 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 639 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 640 dev->type->pm->complete(dev);
@@ -658,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state)
658 * Execute the ->complete() callbacks for all devices whose PM status is not 658 * Execute the ->complete() callbacks for all devices whose PM status is not
659 * DPM_ON (this allows new devices to be registered). 659 * DPM_ON (this allows new devices to be registered).
660 */ 660 */
661static void dpm_complete(pm_message_t state) 661void dpm_complete(pm_message_t state)
662{ 662{
663 struct list_head list; 663 struct list_head list;
664 664
665 might_sleep();
666
665 INIT_LIST_HEAD(&list); 667 INIT_LIST_HEAD(&list);
666 mutex_lock(&dpm_list_mtx); 668 mutex_lock(&dpm_list_mtx);
667 while (!list_empty(&dpm_prepared_list)) { 669 while (!list_empty(&dpm_prepared_list)) {
@@ -690,7 +692,6 @@ static void dpm_complete(pm_message_t state)
690 */ 692 */
691void dpm_resume_end(pm_message_t state) 693void dpm_resume_end(pm_message_t state)
692{ 694{
693 might_sleep();
694 dpm_resume(state); 695 dpm_resume(state);
695 dpm_complete(state); 696 dpm_complete(state);
696} 697}
@@ -732,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 733{
733 int error; 734 int error;
734 735
735 if (dev->type && dev->type->pm) { 736 if (dev->pwr_domain) {
737 pm_dev_dbg(dev, state, "LATE power domain ");
738 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
739 if (error)
740 return error;
741 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 742 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 743 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 744 if (error)
@@ -749,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 755 return error;
750 } 756 }
751 757
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 758 return 0;
758} 759}
759 760
@@ -841,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 842 goto End;
842 } 843 }
843 844
845 if (dev->pwr_domain) {
846 pm_dev_dbg(dev, state, "power domain ");
847 error = pm_op(dev, &dev->pwr_domain->ops, state);
848 goto End;
849 }
850
844 if (dev->type && dev->type->pm) { 851 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 852 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 853 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 854 goto End;
848 } 855 }
849 856
850 if (dev->class) { 857 if (dev->class) {
851 if (dev->class->pm) { 858 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 859 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 860 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 861 goto End;
855 } else if (dev->class->suspend) { 862 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 863 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 864 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 865 goto End;
859 } 866 }
860 } 867 }
861 868
@@ -869,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 876 }
870 } 877 }
871 878
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 879 End:
879 device_unlock(dev); 880 device_unlock(dev);
880 complete_all(&dev->power.completion); 881 complete_all(&dev->power.completion);
@@ -914,11 +915,13 @@ static int device_suspend(struct device *dev)
914 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 915 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
915 * @state: PM transition of the system being carried out. 916 * @state: PM transition of the system being carried out.
916 */ 917 */
917static int dpm_suspend(pm_message_t state) 918int dpm_suspend(pm_message_t state)
918{ 919{
919 ktime_t starttime = ktime_get(); 920 ktime_t starttime = ktime_get();
920 int error = 0; 921 int error = 0;
921 922
923 might_sleep();
924
922 mutex_lock(&dpm_list_mtx); 925 mutex_lock(&dpm_list_mtx);
923 pm_transition = state; 926 pm_transition = state;
924 async_error = 0; 927 async_error = 0;
@@ -965,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 968
966 device_lock(dev); 969 device_lock(dev);
967 970
968 if (dev->type && dev->type->pm) { 971 if (dev->pwr_domain) {
972 pm_dev_dbg(dev, state, "preparing power domain ");
973 if (dev->pwr_domain->ops.prepare)
974 error = dev->pwr_domain->ops.prepare(dev);
975 suspend_report_result(dev->pwr_domain->ops.prepare, error);
976 if (error)
977 goto End;
978 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 979 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 980 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 981 error = dev->type->pm->prepare(dev);
@@ -984,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 994 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 995 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 996 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 997 }
995 998
996 End: 999 End:
@@ -1005,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state)
1005 * 1008 *
1006 * Execute the ->prepare() callback(s) for all devices. 1009 * Execute the ->prepare() callback(s) for all devices.
1007 */ 1010 */
1008static int dpm_prepare(pm_message_t state) 1011int dpm_prepare(pm_message_t state)
1009{ 1012{
1010 int error = 0; 1013 int error = 0;
1011 1014
1015 might_sleep();
1016
1012 mutex_lock(&dpm_list_mtx); 1017 mutex_lock(&dpm_list_mtx);
1013 while (!list_empty(&dpm_list)) { 1018 while (!list_empty(&dpm_list)) {
1014 struct device *dev = to_device(dpm_list.next); 1019 struct device *dev = to_device(dpm_list.next);
@@ -1057,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state)
1057{ 1062{
1058 int error; 1063 int error;
1059 1064
1060 might_sleep();
1061 error = dpm_prepare(state); 1065 error = dpm_prepare(state);
1062 if (!error) 1066 if (!error)
1063 error = dpm_suspend(state); 1067 error = dpm_suspend(state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index fff49bee781d..a9f5b8979611 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
212static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 212static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
213 autosuspend_delay_ms_store); 213 autosuspend_delay_ms_store);
214 214
215#endif 215#endif /* CONFIG_PM_RUNTIME */
216 216
217#ifdef CONFIG_PM_SLEEP
217static ssize_t 218static ssize_t
218wake_show(struct device * dev, struct device_attribute *attr, char * buf) 219wake_show(struct device * dev, struct device_attribute *attr, char * buf)
219{ 220{
@@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr,
248 249
249static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 250static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
250 251
251#ifdef CONFIG_PM_SLEEP
252static ssize_t wakeup_count_show(struct device *dev, 252static ssize_t wakeup_count_show(struct device *dev,
253 struct device_attribute *attr, char *buf) 253 struct device_attribute *attr, char *buf)
254{ 254{
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index abbbd33e8d8a..84f7c7d5a098 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws)
110 spin_lock_irq(&events_lock); 110 spin_lock_irq(&events_lock);
111 list_add_rcu(&ws->entry, &wakeup_sources); 111 list_add_rcu(&ws->entry, &wakeup_sources);
112 spin_unlock_irq(&events_lock); 112 spin_unlock_irq(&events_lock);
113 synchronize_rcu();
114} 113}
115EXPORT_SYMBOL_GPL(wakeup_source_add); 114EXPORT_SYMBOL_GPL(wakeup_source_add);
116 115