diff options
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 233 | ||||
-rw-r--r-- | drivers/base/power/main.c | 333 | ||||
-rw-r--r-- | drivers/base/power/power.h | 6 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 123 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 100 |
6 files changed, 742 insertions, 54 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 3ce3519e8f30..89de75325cea 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | 6 | ||
6 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c new file mode 100644 index 000000000000..4b29d4981253 --- /dev/null +++ b/drivers/base/power/generic_ops.c | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems | ||
3 | * | ||
4 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/pm.h> | ||
10 | #include <linux/pm_runtime.h> | ||
11 | |||
12 | #ifdef CONFIG_PM_RUNTIME | ||
13 | /** | ||
14 | * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. | ||
15 | * @dev: Device to handle. | ||
16 | * | ||
17 | * If PM operations are defined for the @dev's driver and they include | ||
18 | * ->runtime_idle(), execute it and return its error code, if nonzero. | ||
19 | * Otherwise, execute pm_runtime_suspend() for the device and return 0. | ||
20 | */ | ||
21 | int pm_generic_runtime_idle(struct device *dev) | ||
22 | { | ||
23 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
24 | |||
25 | if (pm && pm->runtime_idle) { | ||
26 | int ret = pm->runtime_idle(dev); | ||
27 | if (ret) | ||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | pm_runtime_suspend(dev); | ||
32 | return 0; | ||
33 | } | ||
34 | EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | ||
35 | |||
36 | /** | ||
37 | * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. | ||
38 | * @dev: Device to suspend. | ||
39 | * | ||
40 | * If PM operations are defined for the @dev's driver and they include | ||
41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | ||
42 | * return -EINVAL. | ||
43 | */ | ||
44 | int pm_generic_runtime_suspend(struct device *dev) | ||
45 | { | ||
46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
47 | int ret; | ||
48 | |||
49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; | ||
50 | |||
51 | return ret; | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | ||
54 | |||
55 | /** | ||
56 | * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. | ||
57 | * @dev: Device to resume. | ||
58 | * | ||
59 | * If PM operations are defined for the @dev's driver and they include | ||
60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | ||
61 | * return -EINVAL. | ||
62 | */ | ||
63 | int pm_generic_runtime_resume(struct device *dev) | ||
64 | { | ||
65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
66 | int ret; | ||
67 | |||
68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; | ||
69 | |||
70 | return ret; | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | ||
73 | #endif /* CONFIG_PM_RUNTIME */ | ||
74 | |||
75 | #ifdef CONFIG_PM_SLEEP | ||
76 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | ||
78 | * @dev: Device to handle. | ||
79 | * @event: PM transition of the system under way. | ||
80 | * | ||
81 | * If the device has not been suspended at run time, execute the | ||
82 | * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and | ||
83 | * return its error code. Otherwise, return zero. | ||
84 | */ | ||
85 | static int __pm_generic_call(struct device *dev, int event) | ||
86 | { | ||
87 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
88 | int (*callback)(struct device *); | ||
89 | |||
90 | if (!pm || pm_runtime_suspended(dev)) | ||
91 | return 0; | ||
92 | |||
93 | switch (event) { | ||
94 | case PM_EVENT_SUSPEND: | ||
95 | callback = pm->suspend; | ||
96 | break; | ||
97 | case PM_EVENT_FREEZE: | ||
98 | callback = pm->freeze; | ||
99 | break; | ||
100 | case PM_EVENT_HIBERNATE: | ||
101 | callback = pm->poweroff; | ||
102 | break; | ||
103 | case PM_EVENT_THAW: | ||
104 | callback = pm->thaw; | ||
105 | break; | ||
106 | default: | ||
107 | callback = NULL; | ||
108 | break; | ||
109 | } | ||
110 | |||
111 | return callback ? callback(dev) : 0; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * pm_generic_suspend - Generic suspend callback for subsystems. | ||
116 | * @dev: Device to suspend. | ||
117 | */ | ||
118 | int pm_generic_suspend(struct device *dev) | ||
119 | { | ||
120 | return __pm_generic_call(dev, PM_EVENT_SUSPEND); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | ||
123 | |||
124 | /** | ||
125 | * pm_generic_freeze - Generic freeze callback for subsystems. | ||
126 | * @dev: Device to freeze. | ||
127 | */ | ||
128 | int pm_generic_freeze(struct device *dev) | ||
129 | { | ||
130 | return __pm_generic_call(dev, PM_EVENT_FREEZE); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | ||
133 | |||
134 | /** | ||
135 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | ||
136 | * @dev: Device to handle. | ||
137 | */ | ||
138 | int pm_generic_poweroff(struct device *dev) | ||
139 | { | ||
140 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | ||
143 | |||
144 | /** | ||
145 | * pm_generic_thaw - Generic thaw callback for subsystems. | ||
146 | * @dev: Device to thaw. | ||
147 | */ | ||
148 | int pm_generic_thaw(struct device *dev) | ||
149 | { | ||
150 | return __pm_generic_call(dev, PM_EVENT_THAW); | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | ||
153 | |||
154 | /** | ||
155 | * __pm_generic_resume - Generic resume/restore callback for subsystems. | ||
156 | * @dev: Device to handle. | ||
157 | * @event: PM transition of the system under way. | ||
158 | * | ||
159 | * Execute the resume/resotre callback provided by the @dev's driver, if | ||
160 | * defined. If it returns 0, change the device's runtime PM status to 'active'. | ||
161 | * Return the callback's error code. | ||
162 | */ | ||
163 | static int __pm_generic_resume(struct device *dev, int event) | ||
164 | { | ||
165 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
166 | int (*callback)(struct device *); | ||
167 | int ret; | ||
168 | |||
169 | if (!pm) | ||
170 | return 0; | ||
171 | |||
172 | switch (event) { | ||
173 | case PM_EVENT_RESUME: | ||
174 | callback = pm->resume; | ||
175 | break; | ||
176 | case PM_EVENT_RESTORE: | ||
177 | callback = pm->restore; | ||
178 | break; | ||
179 | default: | ||
180 | callback = NULL; | ||
181 | break; | ||
182 | } | ||
183 | |||
184 | if (!callback) | ||
185 | return 0; | ||
186 | |||
187 | ret = callback(dev); | ||
188 | if (!ret) { | ||
189 | pm_runtime_disable(dev); | ||
190 | pm_runtime_set_active(dev); | ||
191 | pm_runtime_enable(dev); | ||
192 | } | ||
193 | |||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * pm_generic_resume - Generic resume callback for subsystems. | ||
199 | * @dev: Device to resume. | ||
200 | */ | ||
201 | int pm_generic_resume(struct device *dev) | ||
202 | { | ||
203 | return __pm_generic_resume(dev, PM_EVENT_RESUME); | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(pm_generic_resume); | ||
206 | |||
207 | /** | ||
208 | * pm_generic_restore - Generic restore callback for subsystems. | ||
209 | * @dev: Device to restore. | ||
210 | */ | ||
211 | int pm_generic_restore(struct device *dev) | ||
212 | { | ||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | ||
216 | #endif /* CONFIG_PM_SLEEP */ | ||
217 | |||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | ||
219 | #ifdef CONFIG_PM_SLEEP | ||
220 | .suspend = pm_generic_suspend, | ||
221 | .resume = pm_generic_resume, | ||
222 | .freeze = pm_generic_freeze, | ||
223 | .thaw = pm_generic_thaw, | ||
224 | .poweroff = pm_generic_poweroff, | ||
225 | .restore = pm_generic_restore, | ||
226 | #endif | ||
227 | #ifdef CONFIG_PM_RUNTIME | ||
228 | .runtime_suspend = pm_generic_runtime_suspend, | ||
229 | .runtime_resume = pm_generic_runtime_resume, | ||
230 | .runtime_idle = pm_generic_runtime_idle, | ||
231 | #endif | ||
232 | }; | ||
233 | EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 8aa2443182d5..941fcb87e52a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -23,8 +23,9 @@ | |||
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
25 | #include <linux/resume-trace.h> | 25 | #include <linux/resume-trace.h> |
26 | #include <linux/rwsem.h> | ||
27 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | ||
28 | #include <linux/async.h> | ||
28 | 29 | ||
29 | #include "../base.h" | 30 | #include "../base.h" |
30 | #include "power.h" | 31 | #include "power.h" |
@@ -34,14 +35,15 @@ | |||
34 | * because children are guaranteed to be discovered after parents, and | 35 | * because children are guaranteed to be discovered after parents, and |
35 | * are inserted at the back of the list on discovery. | 36 | * are inserted at the back of the list on discovery. |
36 | * | 37 | * |
37 | * Since device_pm_add() may be called with a device semaphore held, | 38 | * Since device_pm_add() may be called with a device lock held, |
38 | * we must never try to acquire a device semaphore while holding | 39 | * we must never try to acquire a device lock while holding |
39 | * dpm_list_mutex. | 40 | * dpm_list_mutex. |
40 | */ | 41 | */ |
41 | 42 | ||
42 | LIST_HEAD(dpm_list); | 43 | LIST_HEAD(dpm_list); |
43 | 44 | ||
44 | static DEFINE_MUTEX(dpm_list_mtx); | 45 | static DEFINE_MUTEX(dpm_list_mtx); |
46 | static pm_message_t pm_transition; | ||
45 | 47 | ||
46 | /* | 48 | /* |
47 | * Set once the preparation of devices for a PM transition has started, reset | 49 | * Set once the preparation of devices for a PM transition has started, reset |
@@ -56,6 +58,7 @@ static bool transition_started; | |||
56 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
57 | { | 59 | { |
58 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
61 | init_completion(&dev->power.completion); | ||
59 | pm_runtime_init(dev); | 62 | pm_runtime_init(dev); |
60 | } | 63 | } |
61 | 64 | ||
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev) | |||
111 | pr_debug("PM: Removing info for %s:%s\n", | 114 | pr_debug("PM: Removing info for %s:%s\n", |
112 | dev->bus ? dev->bus->name : "No Bus", | 115 | dev->bus ? dev->bus->name : "No Bus", |
113 | kobject_name(&dev->kobj)); | 116 | kobject_name(&dev->kobj)); |
117 | complete_all(&dev->power.completion); | ||
114 | mutex_lock(&dpm_list_mtx); | 118 | mutex_lock(&dpm_list_mtx); |
115 | list_del_init(&dev->power.entry); | 119 | list_del_init(&dev->power.entry); |
116 | mutex_unlock(&dpm_list_mtx); | 120 | mutex_unlock(&dpm_list_mtx); |
@@ -161,6 +165,57 @@ void device_pm_move_last(struct device *dev) | |||
161 | list_move_tail(&dev->power.entry, &dpm_list); | 165 | list_move_tail(&dev->power.entry, &dpm_list); |
162 | } | 166 | } |
163 | 167 | ||
168 | static ktime_t initcall_debug_start(struct device *dev) | ||
169 | { | ||
170 | ktime_t calltime = ktime_set(0, 0); | ||
171 | |||
172 | if (initcall_debug) { | ||
173 | pr_info("calling %s+ @ %i\n", | ||
174 | dev_name(dev), task_pid_nr(current)); | ||
175 | calltime = ktime_get(); | ||
176 | } | ||
177 | |||
178 | return calltime; | ||
179 | } | ||
180 | |||
181 | static void initcall_debug_report(struct device *dev, ktime_t calltime, | ||
182 | int error) | ||
183 | { | ||
184 | ktime_t delta, rettime; | ||
185 | |||
186 | if (initcall_debug) { | ||
187 | rettime = ktime_get(); | ||
188 | delta = ktime_sub(rettime, calltime); | ||
189 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
190 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * dpm_wait - Wait for a PM operation to complete. | ||
196 | * @dev: Device to wait for. | ||
197 | * @async: If unset, wait only if the device's power.async_suspend flag is set. | ||
198 | */ | ||
199 | static void dpm_wait(struct device *dev, bool async) | ||
200 | { | ||
201 | if (!dev) | ||
202 | return; | ||
203 | |||
204 | if (async || (pm_async_enabled && dev->power.async_suspend)) | ||
205 | wait_for_completion(&dev->power.completion); | ||
206 | } | ||
207 | |||
208 | static int dpm_wait_fn(struct device *dev, void *async_ptr) | ||
209 | { | ||
210 | dpm_wait(dev, *((bool *)async_ptr)); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static void dpm_wait_for_children(struct device *dev, bool async) | ||
215 | { | ||
216 | device_for_each_child(dev, &async, dpm_wait_fn); | ||
217 | } | ||
218 | |||
164 | /** | 219 | /** |
165 | * pm_op - Execute the PM operation appropriate for given PM event. | 220 | * pm_op - Execute the PM operation appropriate for given PM event. |
166 | * @dev: Device to handle. | 221 | * @dev: Device to handle. |
@@ -172,6 +227,9 @@ static int pm_op(struct device *dev, | |||
172 | pm_message_t state) | 227 | pm_message_t state) |
173 | { | 228 | { |
174 | int error = 0; | 229 | int error = 0; |
230 | ktime_t calltime; | ||
231 | |||
232 | calltime = initcall_debug_start(dev); | ||
175 | 233 | ||
176 | switch (state.event) { | 234 | switch (state.event) { |
177 | #ifdef CONFIG_SUSPEND | 235 | #ifdef CONFIG_SUSPEND |
@@ -219,6 +277,9 @@ static int pm_op(struct device *dev, | |||
219 | default: | 277 | default: |
220 | error = -EINVAL; | 278 | error = -EINVAL; |
221 | } | 279 | } |
280 | |||
281 | initcall_debug_report(dev, calltime, error); | ||
282 | |||
222 | return error; | 283 | return error; |
223 | } | 284 | } |
224 | 285 | ||
@@ -236,6 +297,14 @@ static int pm_noirq_op(struct device *dev, | |||
236 | pm_message_t state) | 297 | pm_message_t state) |
237 | { | 298 | { |
238 | int error = 0; | 299 | int error = 0; |
300 | ktime_t calltime, delta, rettime; | ||
301 | |||
302 | if (initcall_debug) { | ||
303 | pr_info("calling %s+ @ %i, parent: %s\n", | ||
304 | dev_name(dev), task_pid_nr(current), | ||
305 | dev->parent ? dev_name(dev->parent) : "none"); | ||
306 | calltime = ktime_get(); | ||
307 | } | ||
239 | 308 | ||
240 | switch (state.event) { | 309 | switch (state.event) { |
241 | #ifdef CONFIG_SUSPEND | 310 | #ifdef CONFIG_SUSPEND |
@@ -283,6 +352,15 @@ static int pm_noirq_op(struct device *dev, | |||
283 | default: | 352 | default: |
284 | error = -EINVAL; | 353 | error = -EINVAL; |
285 | } | 354 | } |
355 | |||
356 | if (initcall_debug) { | ||
357 | rettime = ktime_get(); | ||
358 | delta = ktime_sub(rettime, calltime); | ||
359 | printk("initcall %s_i+ returned %d after %Ld usecs\n", | ||
360 | dev_name(dev), error, | ||
361 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
362 | } | ||
363 | |||
286 | return error; | 364 | return error; |
287 | } | 365 | } |
288 | 366 | ||
@@ -324,6 +402,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
324 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 402 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
325 | } | 403 | } |
326 | 404 | ||
405 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | ||
406 | { | ||
407 | ktime_t calltime; | ||
408 | s64 usecs64; | ||
409 | int usecs; | ||
410 | |||
411 | calltime = ktime_get(); | ||
412 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); | ||
413 | do_div(usecs64, NSEC_PER_USEC); | ||
414 | usecs = usecs64; | ||
415 | if (usecs == 0) | ||
416 | usecs = 1; | ||
417 | pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", | ||
418 | info ?: "", info ? " " : "", pm_verb(state.event), | ||
419 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | ||
420 | } | ||
421 | |||
327 | /*------------------------- Resume routines -------------------------*/ | 422 | /*------------------------- Resume routines -------------------------*/ |
328 | 423 | ||
329 | /** | 424 | /** |
@@ -341,14 +436,26 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
341 | TRACE_DEVICE(dev); | 436 | TRACE_DEVICE(dev); |
342 | TRACE_RESUME(0); | 437 | TRACE_RESUME(0); |
343 | 438 | ||
344 | if (!dev->bus) | 439 | if (dev->bus && dev->bus->pm) { |
345 | goto End; | ||
346 | |||
347 | if (dev->bus->pm) { | ||
348 | pm_dev_dbg(dev, state, "EARLY "); | 440 | pm_dev_dbg(dev, state, "EARLY "); |
349 | error = pm_noirq_op(dev, dev->bus->pm, state); | 441 | error = pm_noirq_op(dev, dev->bus->pm, state); |
442 | if (error) | ||
443 | goto End; | ||
350 | } | 444 | } |
351 | End: | 445 | |
446 | if (dev->type && dev->type->pm) { | ||
447 | pm_dev_dbg(dev, state, "EARLY type "); | ||
448 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
449 | if (error) | ||
450 | goto End; | ||
451 | } | ||
452 | |||
453 | if (dev->class && dev->class->pm) { | ||
454 | pm_dev_dbg(dev, state, "EARLY class "); | ||
455 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
456 | } | ||
457 | |||
458 | End: | ||
352 | TRACE_RESUME(error); | 459 | TRACE_RESUME(error); |
353 | return error; | 460 | return error; |
354 | } | 461 | } |
@@ -363,6 +470,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
363 | void dpm_resume_noirq(pm_message_t state) | 470 | void dpm_resume_noirq(pm_message_t state) |
364 | { | 471 | { |
365 | struct device *dev; | 472 | struct device *dev; |
473 | ktime_t starttime = ktime_get(); | ||
366 | 474 | ||
367 | mutex_lock(&dpm_list_mtx); | 475 | mutex_lock(&dpm_list_mtx); |
368 | transition_started = false; | 476 | transition_started = false; |
@@ -376,23 +484,48 @@ void dpm_resume_noirq(pm_message_t state) | |||
376 | pm_dev_err(dev, state, " early", error); | 484 | pm_dev_err(dev, state, " early", error); |
377 | } | 485 | } |
378 | mutex_unlock(&dpm_list_mtx); | 486 | mutex_unlock(&dpm_list_mtx); |
487 | dpm_show_time(starttime, state, "early"); | ||
379 | resume_device_irqs(); | 488 | resume_device_irqs(); |
380 | } | 489 | } |
381 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 490 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
382 | 491 | ||
383 | /** | 492 | /** |
493 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
494 | * @dev: Device to resume. | ||
495 | * @cb: Resume callback to execute. | ||
496 | */ | ||
497 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
498 | { | ||
499 | int error; | ||
500 | ktime_t calltime; | ||
501 | |||
502 | calltime = initcall_debug_start(dev); | ||
503 | |||
504 | error = cb(dev); | ||
505 | suspend_report_result(cb, error); | ||
506 | |||
507 | initcall_debug_report(dev, calltime, error); | ||
508 | |||
509 | return error; | ||
510 | } | ||
511 | |||
512 | /** | ||
384 | * device_resume - Execute "resume" callbacks for given device. | 513 | * device_resume - Execute "resume" callbacks for given device. |
385 | * @dev: Device to handle. | 514 | * @dev: Device to handle. |
386 | * @state: PM transition of the system being carried out. | 515 | * @state: PM transition of the system being carried out. |
516 | * @async: If true, the device is being resumed asynchronously. | ||
387 | */ | 517 | */ |
388 | static int device_resume(struct device *dev, pm_message_t state) | 518 | static int device_resume(struct device *dev, pm_message_t state, bool async) |
389 | { | 519 | { |
390 | int error = 0; | 520 | int error = 0; |
391 | 521 | ||
392 | TRACE_DEVICE(dev); | 522 | TRACE_DEVICE(dev); |
393 | TRACE_RESUME(0); | 523 | TRACE_RESUME(0); |
394 | 524 | ||
395 | down(&dev->sem); | 525 | dpm_wait(dev->parent, async); |
526 | device_lock(dev); | ||
527 | |||
528 | dev->power.status = DPM_RESUMING; | ||
396 | 529 | ||
397 | if (dev->bus) { | 530 | if (dev->bus) { |
398 | if (dev->bus->pm) { | 531 | if (dev->bus->pm) { |
@@ -400,7 +533,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
400 | error = pm_op(dev, dev->bus->pm, state); | 533 | error = pm_op(dev, dev->bus->pm, state); |
401 | } else if (dev->bus->resume) { | 534 | } else if (dev->bus->resume) { |
402 | pm_dev_dbg(dev, state, "legacy "); | 535 | pm_dev_dbg(dev, state, "legacy "); |
403 | error = dev->bus->resume(dev); | 536 | error = legacy_resume(dev, dev->bus->resume); |
404 | } | 537 | } |
405 | if (error) | 538 | if (error) |
406 | goto End; | 539 | goto End; |
@@ -421,16 +554,34 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
421 | error = pm_op(dev, dev->class->pm, state); | 554 | error = pm_op(dev, dev->class->pm, state); |
422 | } else if (dev->class->resume) { | 555 | } else if (dev->class->resume) { |
423 | pm_dev_dbg(dev, state, "legacy class "); | 556 | pm_dev_dbg(dev, state, "legacy class "); |
424 | error = dev->class->resume(dev); | 557 | error = legacy_resume(dev, dev->class->resume); |
425 | } | 558 | } |
426 | } | 559 | } |
427 | End: | 560 | End: |
428 | up(&dev->sem); | 561 | device_unlock(dev); |
562 | complete_all(&dev->power.completion); | ||
429 | 563 | ||
430 | TRACE_RESUME(error); | 564 | TRACE_RESUME(error); |
431 | return error; | 565 | return error; |
432 | } | 566 | } |
433 | 567 | ||
568 | static void async_resume(void *data, async_cookie_t cookie) | ||
569 | { | ||
570 | struct device *dev = (struct device *)data; | ||
571 | int error; | ||
572 | |||
573 | error = device_resume(dev, pm_transition, true); | ||
574 | if (error) | ||
575 | pm_dev_err(dev, pm_transition, " async", error); | ||
576 | put_device(dev); | ||
577 | } | ||
578 | |||
579 | static bool is_async(struct device *dev) | ||
580 | { | ||
581 | return dev->power.async_suspend && pm_async_enabled | ||
582 | && !pm_trace_is_enabled(); | ||
583 | } | ||
584 | |||
434 | /** | 585 | /** |
435 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. | 586 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
436 | * @state: PM transition of the system being carried out. | 587 | * @state: PM transition of the system being carried out. |
@@ -441,20 +592,33 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
441 | static void dpm_resume(pm_message_t state) | 592 | static void dpm_resume(pm_message_t state) |
442 | { | 593 | { |
443 | struct list_head list; | 594 | struct list_head list; |
595 | struct device *dev; | ||
596 | ktime_t starttime = ktime_get(); | ||
444 | 597 | ||
445 | INIT_LIST_HEAD(&list); | 598 | INIT_LIST_HEAD(&list); |
446 | mutex_lock(&dpm_list_mtx); | 599 | mutex_lock(&dpm_list_mtx); |
447 | while (!list_empty(&dpm_list)) { | 600 | pm_transition = state; |
448 | struct device *dev = to_device(dpm_list.next); | 601 | |
602 | list_for_each_entry(dev, &dpm_list, power.entry) { | ||
603 | if (dev->power.status < DPM_OFF) | ||
604 | continue; | ||
605 | |||
606 | INIT_COMPLETION(dev->power.completion); | ||
607 | if (is_async(dev)) { | ||
608 | get_device(dev); | ||
609 | async_schedule(async_resume, dev); | ||
610 | } | ||
611 | } | ||
449 | 612 | ||
613 | while (!list_empty(&dpm_list)) { | ||
614 | dev = to_device(dpm_list.next); | ||
450 | get_device(dev); | 615 | get_device(dev); |
451 | if (dev->power.status >= DPM_OFF) { | 616 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { |
452 | int error; | 617 | int error; |
453 | 618 | ||
454 | dev->power.status = DPM_RESUMING; | ||
455 | mutex_unlock(&dpm_list_mtx); | 619 | mutex_unlock(&dpm_list_mtx); |
456 | 620 | ||
457 | error = device_resume(dev, state); | 621 | error = device_resume(dev, state, false); |
458 | 622 | ||
459 | mutex_lock(&dpm_list_mtx); | 623 | mutex_lock(&dpm_list_mtx); |
460 | if (error) | 624 | if (error) |
@@ -469,6 +633,8 @@ static void dpm_resume(pm_message_t state) | |||
469 | } | 633 | } |
470 | list_splice(&list, &dpm_list); | 634 | list_splice(&list, &dpm_list); |
471 | mutex_unlock(&dpm_list_mtx); | 635 | mutex_unlock(&dpm_list_mtx); |
636 | async_synchronize_full(); | ||
637 | dpm_show_time(starttime, state, NULL); | ||
472 | } | 638 | } |
473 | 639 | ||
474 | /** | 640 | /** |
@@ -478,7 +644,7 @@ static void dpm_resume(pm_message_t state) | |||
478 | */ | 644 | */ |
479 | static void device_complete(struct device *dev, pm_message_t state) | 645 | static void device_complete(struct device *dev, pm_message_t state) |
480 | { | 646 | { |
481 | down(&dev->sem); | 647 | device_lock(dev); |
482 | 648 | ||
483 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 649 | if (dev->class && dev->class->pm && dev->class->pm->complete) { |
484 | pm_dev_dbg(dev, state, "completing class "); | 650 | pm_dev_dbg(dev, state, "completing class "); |
@@ -495,7 +661,7 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
495 | dev->bus->pm->complete(dev); | 661 | dev->bus->pm->complete(dev); |
496 | } | 662 | } |
497 | 663 | ||
498 | up(&dev->sem); | 664 | device_unlock(dev); |
499 | } | 665 | } |
500 | 666 | ||
501 | /** | 667 | /** |
@@ -521,7 +687,7 @@ static void dpm_complete(pm_message_t state) | |||
521 | mutex_unlock(&dpm_list_mtx); | 687 | mutex_unlock(&dpm_list_mtx); |
522 | 688 | ||
523 | device_complete(dev, state); | 689 | device_complete(dev, state); |
524 | pm_runtime_put_noidle(dev); | 690 | pm_runtime_put_sync(dev); |
525 | 691 | ||
526 | mutex_lock(&dpm_list_mtx); | 692 | mutex_lock(&dpm_list_mtx); |
527 | } | 693 | } |
@@ -584,13 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
584 | { | 750 | { |
585 | int error = 0; | 751 | int error = 0; |
586 | 752 | ||
587 | if (!dev->bus) | 753 | if (dev->class && dev->class->pm) { |
588 | return 0; | 754 | pm_dev_dbg(dev, state, "LATE class "); |
755 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
756 | if (error) | ||
757 | goto End; | ||
758 | } | ||
589 | 759 | ||
590 | if (dev->bus->pm) { | 760 | if (dev->type && dev->type->pm) { |
761 | pm_dev_dbg(dev, state, "LATE type "); | ||
762 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
763 | if (error) | ||
764 | goto End; | ||
765 | } | ||
766 | |||
767 | if (dev->bus && dev->bus->pm) { | ||
591 | pm_dev_dbg(dev, state, "LATE "); | 768 | pm_dev_dbg(dev, state, "LATE "); |
592 | error = pm_noirq_op(dev, dev->bus->pm, state); | 769 | error = pm_noirq_op(dev, dev->bus->pm, state); |
593 | } | 770 | } |
771 | |||
772 | End: | ||
594 | return error; | 773 | return error; |
595 | } | 774 | } |
596 | 775 | ||
@@ -604,6 +783,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
604 | int dpm_suspend_noirq(pm_message_t state) | 783 | int dpm_suspend_noirq(pm_message_t state) |
605 | { | 784 | { |
606 | struct device *dev; | 785 | struct device *dev; |
786 | ktime_t starttime = ktime_get(); | ||
607 | int error = 0; | 787 | int error = 0; |
608 | 788 | ||
609 | suspend_device_irqs(); | 789 | suspend_device_irqs(); |
@@ -619,20 +799,51 @@ int dpm_suspend_noirq(pm_message_t state) | |||
619 | mutex_unlock(&dpm_list_mtx); | 799 | mutex_unlock(&dpm_list_mtx); |
620 | if (error) | 800 | if (error) |
621 | dpm_resume_noirq(resume_event(state)); | 801 | dpm_resume_noirq(resume_event(state)); |
802 | else | ||
803 | dpm_show_time(starttime, state, "late"); | ||
622 | return error; | 804 | return error; |
623 | } | 805 | } |
624 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 806 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); |
625 | 807 | ||
626 | /** | 808 | /** |
809 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | ||
810 | * @dev: Device to suspend. | ||
811 | * @state: PM transition of the system being carried out. | ||
812 | * @cb: Suspend callback to execute. | ||
813 | */ | ||
814 | static int legacy_suspend(struct device *dev, pm_message_t state, | ||
815 | int (*cb)(struct device *dev, pm_message_t state)) | ||
816 | { | ||
817 | int error; | ||
818 | ktime_t calltime; | ||
819 | |||
820 | calltime = initcall_debug_start(dev); | ||
821 | |||
822 | error = cb(dev, state); | ||
823 | suspend_report_result(cb, error); | ||
824 | |||
825 | initcall_debug_report(dev, calltime, error); | ||
826 | |||
827 | return error; | ||
828 | } | ||
829 | |||
830 | static int async_error; | ||
831 | |||
832 | /** | ||
627 | * device_suspend - Execute "suspend" callbacks for given device. | 833 | * device_suspend - Execute "suspend" callbacks for given device. |
628 | * @dev: Device to handle. | 834 | * @dev: Device to handle. |
629 | * @state: PM transition of the system being carried out. | 835 | * @state: PM transition of the system being carried out. |
836 | * @async: If true, the device is being suspended asynchronously. | ||
630 | */ | 837 | */ |
631 | static int device_suspend(struct device *dev, pm_message_t state) | 838 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) |
632 | { | 839 | { |
633 | int error = 0; | 840 | int error = 0; |
634 | 841 | ||
635 | down(&dev->sem); | 842 | dpm_wait_for_children(dev, async); |
843 | device_lock(dev); | ||
844 | |||
845 | if (async_error) | ||
846 | goto End; | ||
636 | 847 | ||
637 | if (dev->class) { | 848 | if (dev->class) { |
638 | if (dev->class->pm) { | 849 | if (dev->class->pm) { |
@@ -640,8 +851,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
640 | error = pm_op(dev, dev->class->pm, state); | 851 | error = pm_op(dev, dev->class->pm, state); |
641 | } else if (dev->class->suspend) { | 852 | } else if (dev->class->suspend) { |
642 | pm_dev_dbg(dev, state, "legacy class "); | 853 | pm_dev_dbg(dev, state, "legacy class "); |
643 | error = dev->class->suspend(dev, state); | 854 | error = legacy_suspend(dev, state, dev->class->suspend); |
644 | suspend_report_result(dev->class->suspend, error); | ||
645 | } | 855 | } |
646 | if (error) | 856 | if (error) |
647 | goto End; | 857 | goto End; |
@@ -662,16 +872,47 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
662 | error = pm_op(dev, dev->bus->pm, state); | 872 | error = pm_op(dev, dev->bus->pm, state); |
663 | } else if (dev->bus->suspend) { | 873 | } else if (dev->bus->suspend) { |
664 | pm_dev_dbg(dev, state, "legacy "); | 874 | pm_dev_dbg(dev, state, "legacy "); |
665 | error = dev->bus->suspend(dev, state); | 875 | error = legacy_suspend(dev, state, dev->bus->suspend); |
666 | suspend_report_result(dev->bus->suspend, error); | ||
667 | } | 876 | } |
668 | } | 877 | } |
878 | |||
879 | if (!error) | ||
880 | dev->power.status = DPM_OFF; | ||
881 | |||
669 | End: | 882 | End: |
670 | up(&dev->sem); | 883 | device_unlock(dev); |
884 | complete_all(&dev->power.completion); | ||
671 | 885 | ||
672 | return error; | 886 | return error; |
673 | } | 887 | } |
674 | 888 | ||
889 | static void async_suspend(void *data, async_cookie_t cookie) | ||
890 | { | ||
891 | struct device *dev = (struct device *)data; | ||
892 | int error; | ||
893 | |||
894 | error = __device_suspend(dev, pm_transition, true); | ||
895 | if (error) { | ||
896 | pm_dev_err(dev, pm_transition, " async", error); | ||
897 | async_error = error; | ||
898 | } | ||
899 | |||
900 | put_device(dev); | ||
901 | } | ||
902 | |||
903 | static int device_suspend(struct device *dev) | ||
904 | { | ||
905 | INIT_COMPLETION(dev->power.completion); | ||
906 | |||
907 | if (pm_async_enabled && dev->power.async_suspend) { | ||
908 | get_device(dev); | ||
909 | async_schedule(async_suspend, dev); | ||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | return __device_suspend(dev, pm_transition, false); | ||
914 | } | ||
915 | |||
675 | /** | 916 | /** |
676 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 917 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
677 | * @state: PM transition of the system being carried out. | 918 | * @state: PM transition of the system being carried out. |
@@ -679,17 +920,20 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
679 | static int dpm_suspend(pm_message_t state) | 920 | static int dpm_suspend(pm_message_t state) |
680 | { | 921 | { |
681 | struct list_head list; | 922 | struct list_head list; |
923 | ktime_t starttime = ktime_get(); | ||
682 | int error = 0; | 924 | int error = 0; |
683 | 925 | ||
684 | INIT_LIST_HEAD(&list); | 926 | INIT_LIST_HEAD(&list); |
685 | mutex_lock(&dpm_list_mtx); | 927 | mutex_lock(&dpm_list_mtx); |
928 | pm_transition = state; | ||
929 | async_error = 0; | ||
686 | while (!list_empty(&dpm_list)) { | 930 | while (!list_empty(&dpm_list)) { |
687 | struct device *dev = to_device(dpm_list.prev); | 931 | struct device *dev = to_device(dpm_list.prev); |
688 | 932 | ||
689 | get_device(dev); | 933 | get_device(dev); |
690 | mutex_unlock(&dpm_list_mtx); | 934 | mutex_unlock(&dpm_list_mtx); |
691 | 935 | ||
692 | error = device_suspend(dev, state); | 936 | error = device_suspend(dev); |
693 | 937 | ||
694 | mutex_lock(&dpm_list_mtx); | 938 | mutex_lock(&dpm_list_mtx); |
695 | if (error) { | 939 | if (error) { |
@@ -697,13 +941,19 @@ static int dpm_suspend(pm_message_t state) | |||
697 | put_device(dev); | 941 | put_device(dev); |
698 | break; | 942 | break; |
699 | } | 943 | } |
700 | dev->power.status = DPM_OFF; | ||
701 | if (!list_empty(&dev->power.entry)) | 944 | if (!list_empty(&dev->power.entry)) |
702 | list_move(&dev->power.entry, &list); | 945 | list_move(&dev->power.entry, &list); |
703 | put_device(dev); | 946 | put_device(dev); |
947 | if (async_error) | ||
948 | break; | ||
704 | } | 949 | } |
705 | list_splice(&list, dpm_list.prev); | 950 | list_splice(&list, dpm_list.prev); |
706 | mutex_unlock(&dpm_list_mtx); | 951 | mutex_unlock(&dpm_list_mtx); |
952 | async_synchronize_full(); | ||
953 | if (!error) | ||
954 | error = async_error; | ||
955 | if (!error) | ||
956 | dpm_show_time(starttime, state, NULL); | ||
707 | return error; | 957 | return error; |
708 | } | 958 | } |
709 | 959 | ||
@@ -719,7 +969,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
719 | { | 969 | { |
720 | int error = 0; | 970 | int error = 0; |
721 | 971 | ||
722 | down(&dev->sem); | 972 | device_lock(dev); |
723 | 973 | ||
724 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 974 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { |
725 | pm_dev_dbg(dev, state, "preparing "); | 975 | pm_dev_dbg(dev, state, "preparing "); |
@@ -743,7 +993,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
743 | suspend_report_result(dev->class->pm->prepare, error); | 993 | suspend_report_result(dev->class->pm->prepare, error); |
744 | } | 994 | } |
745 | End: | 995 | End: |
746 | up(&dev->sem); | 996 | device_unlock(dev); |
747 | 997 | ||
748 | return error; | 998 | return error; |
749 | } | 999 | } |
@@ -772,7 +1022,7 @@ static int dpm_prepare(pm_message_t state) | |||
772 | pm_runtime_get_noresume(dev); | 1022 | pm_runtime_get_noresume(dev); |
773 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 1023 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { |
774 | /* Wake-up requested during system sleep transition. */ | 1024 | /* Wake-up requested during system sleep transition. */ |
775 | pm_runtime_put_noidle(dev); | 1025 | pm_runtime_put_sync(dev); |
776 | error = -EBUSY; | 1026 | error = -EBUSY; |
777 | } else { | 1027 | } else { |
778 | error = device_prepare(dev, state); | 1028 | error = device_prepare(dev, state); |
@@ -827,3 +1077,14 @@ void __suspend_report_result(const char *function, void *fn, int ret) | |||
827 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); | 1077 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); |
828 | } | 1078 | } |
829 | EXPORT_SYMBOL_GPL(__suspend_report_result); | 1079 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
1080 | |||
1081 | /** | ||
1082 | * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. | ||
1083 | * @dev: Device to wait for. | ||
1084 | * @subordinate: Device that needs to wait for @dev. | ||
1085 | */ | ||
1086 | void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | ||
1087 | { | ||
1088 | dpm_wait(dev, subordinate->power.async_suspend); | ||
1089 | } | ||
1090 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b8fa1aa5225a..c0bd03c83b9c 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {} | |||
12 | 12 | ||
13 | #ifdef CONFIG_PM_SLEEP | 13 | #ifdef CONFIG_PM_SLEEP |
14 | 14 | ||
15 | /* | 15 | /* kernel/power/main.c */ |
16 | * main.c | 16 | extern int pm_async_enabled; |
17 | */ | ||
18 | 17 | ||
18 | /* drivers/base/power/main.c */ | ||
19 | extern struct list_head dpm_list; /* The active device list */ | 19 | extern struct list_head dpm_list; /* The active device list */ |
20 | 20 | ||
21 | static inline struct device *to_device(struct list_head *entry) | 21 | static inline struct device *to_device(struct list_head *entry) |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 846d89e3d122..626dd147b75f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev) | |||
85 | dev->bus->pm->runtime_idle(dev); | 85 | dev->bus->pm->runtime_idle(dev); |
86 | 86 | ||
87 | spin_lock_irq(&dev->power.lock); | 87 | spin_lock_irq(&dev->power.lock); |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | ||
89 | spin_unlock_irq(&dev->power.lock); | ||
90 | |||
91 | dev->type->pm->runtime_idle(dev); | ||
92 | |||
93 | spin_lock_irq(&dev->power.lock); | ||
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | ||
97 | |||
98 | dev->class->pm->runtime_idle(dev); | ||
99 | |||
100 | spin_lock_irq(&dev->power.lock); | ||
88 | } | 101 | } |
89 | 102 | ||
90 | dev->power.idle_notification = false; | 103 | dev->power.idle_notification = false; |
@@ -185,6 +198,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
185 | } | 198 | } |
186 | 199 | ||
187 | dev->power.runtime_status = RPM_SUSPENDING; | 200 | dev->power.runtime_status = RPM_SUSPENDING; |
201 | dev->power.deferred_resume = false; | ||
188 | 202 | ||
189 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 203 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
190 | spin_unlock_irq(&dev->power.lock); | 204 | spin_unlock_irq(&dev->power.lock); |
@@ -193,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
193 | 207 | ||
194 | spin_lock_irq(&dev->power.lock); | 208 | spin_lock_irq(&dev->power.lock); |
195 | dev->power.runtime_error = retval; | 209 | dev->power.runtime_error = retval; |
210 | } else if (dev->type && dev->type->pm | ||
211 | && dev->type->pm->runtime_suspend) { | ||
212 | spin_unlock_irq(&dev->power.lock); | ||
213 | |||
214 | retval = dev->type->pm->runtime_suspend(dev); | ||
215 | |||
216 | spin_lock_irq(&dev->power.lock); | ||
217 | dev->power.runtime_error = retval; | ||
218 | } else if (dev->class && dev->class->pm | ||
219 | && dev->class->pm->runtime_suspend) { | ||
220 | spin_unlock_irq(&dev->power.lock); | ||
221 | |||
222 | retval = dev->class->pm->runtime_suspend(dev); | ||
223 | |||
224 | spin_lock_irq(&dev->power.lock); | ||
225 | dev->power.runtime_error = retval; | ||
196 | } else { | 226 | } else { |
197 | retval = -ENOSYS; | 227 | retval = -ENOSYS; |
198 | } | 228 | } |
@@ -200,7 +230,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
200 | if (retval) { | 230 | if (retval) { |
201 | dev->power.runtime_status = RPM_ACTIVE; | 231 | dev->power.runtime_status = RPM_ACTIVE; |
202 | pm_runtime_cancel_pending(dev); | 232 | pm_runtime_cancel_pending(dev); |
203 | dev->power.deferred_resume = false; | ||
204 | 233 | ||
205 | if (retval == -EAGAIN || retval == -EBUSY) { | 234 | if (retval == -EAGAIN || retval == -EBUSY) { |
206 | notify = true; | 235 | notify = true; |
@@ -217,7 +246,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
217 | wake_up_all(&dev->power.wait_queue); | 246 | wake_up_all(&dev->power.wait_queue); |
218 | 247 | ||
219 | if (dev->power.deferred_resume) { | 248 | if (dev->power.deferred_resume) { |
220 | dev->power.deferred_resume = false; | ||
221 | __pm_runtime_resume(dev, false); | 249 | __pm_runtime_resume(dev, false); |
222 | retval = -EAGAIN; | 250 | retval = -EAGAIN; |
223 | goto out; | 251 | goto out; |
@@ -360,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
360 | 388 | ||
361 | spin_lock_irq(&dev->power.lock); | 389 | spin_lock_irq(&dev->power.lock); |
362 | dev->power.runtime_error = retval; | 390 | dev->power.runtime_error = retval; |
391 | } else if (dev->type && dev->type->pm | ||
392 | && dev->type->pm->runtime_resume) { | ||
393 | spin_unlock_irq(&dev->power.lock); | ||
394 | |||
395 | retval = dev->type->pm->runtime_resume(dev); | ||
396 | |||
397 | spin_lock_irq(&dev->power.lock); | ||
398 | dev->power.runtime_error = retval; | ||
399 | } else if (dev->class && dev->class->pm | ||
400 | && dev->class->pm->runtime_resume) { | ||
401 | spin_unlock_irq(&dev->power.lock); | ||
402 | |||
403 | retval = dev->class->pm->runtime_resume(dev); | ||
404 | |||
405 | spin_lock_irq(&dev->power.lock); | ||
406 | dev->power.runtime_error = retval; | ||
363 | } else { | 407 | } else { |
364 | retval = -ENOSYS; | 408 | retval = -ENOSYS; |
365 | } | 409 | } |
@@ -626,6 +670,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
626 | goto out; | 670 | goto out; |
627 | 671 | ||
628 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 672 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
673 | if (!dev->power.timer_expires) | ||
674 | dev->power.timer_expires = 1; | ||
629 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 675 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
630 | 676 | ||
631 | out: | 677 | out: |
@@ -659,13 +705,17 @@ static int __pm_request_resume(struct device *dev) | |||
659 | 705 | ||
660 | pm_runtime_deactivate_timer(dev); | 706 | pm_runtime_deactivate_timer(dev); |
661 | 707 | ||
708 | if (dev->power.runtime_status == RPM_SUSPENDING) { | ||
709 | dev->power.deferred_resume = true; | ||
710 | return retval; | ||
711 | } | ||
662 | if (dev->power.request_pending) { | 712 | if (dev->power.request_pending) { |
663 | /* If non-resume request is pending, we can overtake it. */ | 713 | /* If non-resume request is pending, we can overtake it. */ |
664 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | 714 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; |
665 | return retval; | 715 | return retval; |
666 | } else if (retval) { | ||
667 | return retval; | ||
668 | } | 716 | } |
717 | if (retval) | ||
718 | return retval; | ||
669 | 719 | ||
670 | dev->power.request = RPM_REQ_RESUME; | 720 | dev->power.request = RPM_REQ_RESUME; |
671 | dev->power.request_pending = true; | 721 | dev->power.request_pending = true; |
@@ -696,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume); | |||
696 | * @dev: Device to handle. | 746 | * @dev: Device to handle. |
697 | * @sync: If set and the device is suspended, resume it synchronously. | 747 | * @sync: If set and the device is suspended, resume it synchronously. |
698 | * | 748 | * |
699 | * Increment the usage count of the device and if it was zero previously, | 749 | * Increment the usage count of the device and resume it or submit a resume |
700 | * resume it or submit a resume request for it, depending on the value of @sync. | 750 | * request for it, depending on the value of @sync. |
701 | */ | 751 | */ |
702 | int __pm_runtime_get(struct device *dev, bool sync) | 752 | int __pm_runtime_get(struct device *dev, bool sync) |
703 | { | 753 | { |
704 | int retval = 1; | 754 | int retval; |
705 | 755 | ||
706 | if (atomic_add_return(1, &dev->power.usage_count) == 1) | 756 | atomic_inc(&dev->power.usage_count); |
707 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 757 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); |
708 | 758 | ||
709 | return retval; | 759 | return retval; |
710 | } | 760 | } |
@@ -777,7 +827,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
777 | } | 827 | } |
778 | 828 | ||
779 | if (parent) { | 829 | if (parent) { |
780 | spin_lock(&parent->power.lock); | 830 | spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); |
781 | 831 | ||
782 | /* | 832 | /* |
783 | * It is invalid to put an active child under a parent that is | 833 | * It is invalid to put an active child under a parent that is |
@@ -786,12 +836,10 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
786 | */ | 836 | */ |
787 | if (!parent->power.disable_depth | 837 | if (!parent->power.disable_depth |
788 | && !parent->power.ignore_children | 838 | && !parent->power.ignore_children |
789 | && parent->power.runtime_status != RPM_ACTIVE) { | 839 | && parent->power.runtime_status != RPM_ACTIVE) |
790 | error = -EBUSY; | 840 | error = -EBUSY; |
791 | } else { | 841 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
792 | if (dev->power.runtime_status == RPM_SUSPENDED) | 842 | atomic_inc(&parent->power.child_count); |
793 | atomic_inc(&parent->power.child_count); | ||
794 | } | ||
795 | 843 | ||
796 | spin_unlock(&parent->power.lock); | 844 | spin_unlock(&parent->power.lock); |
797 | 845 | ||
@@ -963,6 +1011,50 @@ void pm_runtime_enable(struct device *dev) | |||
963 | EXPORT_SYMBOL_GPL(pm_runtime_enable); | 1011 | EXPORT_SYMBOL_GPL(pm_runtime_enable); |
964 | 1012 | ||
965 | /** | 1013 | /** |
1014 | * pm_runtime_forbid - Block run-time PM of a device. | ||
1015 | * @dev: Device to handle. | ||
1016 | * | ||
1017 | * Increase the device's usage count and clear its power.runtime_auto flag, | ||
1018 | * so that it cannot be suspended at run time until pm_runtime_allow() is called | ||
1019 | * for it. | ||
1020 | */ | ||
1021 | void pm_runtime_forbid(struct device *dev) | ||
1022 | { | ||
1023 | spin_lock_irq(&dev->power.lock); | ||
1024 | if (!dev->power.runtime_auto) | ||
1025 | goto out; | ||
1026 | |||
1027 | dev->power.runtime_auto = false; | ||
1028 | atomic_inc(&dev->power.usage_count); | ||
1029 | __pm_runtime_resume(dev, false); | ||
1030 | |||
1031 | out: | ||
1032 | spin_unlock_irq(&dev->power.lock); | ||
1033 | } | ||
1034 | EXPORT_SYMBOL_GPL(pm_runtime_forbid); | ||
1035 | |||
1036 | /** | ||
1037 | * pm_runtime_allow - Unblock run-time PM of a device. | ||
1038 | * @dev: Device to handle. | ||
1039 | * | ||
1040 | * Decrease the device's usage count and set its power.runtime_auto flag. | ||
1041 | */ | ||
1042 | void pm_runtime_allow(struct device *dev) | ||
1043 | { | ||
1044 | spin_lock_irq(&dev->power.lock); | ||
1045 | if (dev->power.runtime_auto) | ||
1046 | goto out; | ||
1047 | |||
1048 | dev->power.runtime_auto = true; | ||
1049 | if (atomic_dec_and_test(&dev->power.usage_count)) | ||
1050 | __pm_runtime_idle(dev); | ||
1051 | |||
1052 | out: | ||
1053 | spin_unlock_irq(&dev->power.lock); | ||
1054 | } | ||
1055 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | ||
1056 | |||
1057 | /** | ||
966 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1058 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
967 | * @dev: Device object to initialize. | 1059 | * @dev: Device object to initialize. |
968 | */ | 1060 | */ |
@@ -980,6 +1072,7 @@ void pm_runtime_init(struct device *dev) | |||
980 | 1072 | ||
981 | atomic_set(&dev->power.child_count, 0); | 1073 | atomic_set(&dev->power.child_count, 0); |
982 | pm_suspend_ignore_children(dev, false); | 1074 | pm_suspend_ignore_children(dev, false); |
1075 | dev->power.runtime_auto = true; | ||
983 | 1076 | ||
984 | dev->power.request_pending = false; | 1077 | dev->power.request_pending = false; |
985 | dev->power.request = RPM_REQ_NONE; | 1078 | dev->power.request = RPM_REQ_NONE; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 596aeecfdffe..86fd9373447e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -4,9 +4,25 @@ | |||
4 | 4 | ||
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/pm_runtime.h> | ||
7 | #include "power.h" | 8 | #include "power.h" |
8 | 9 | ||
9 | /* | 10 | /* |
11 | * control - Report/change current runtime PM setting of the device | ||
12 | * | ||
13 | * Runtime power management of a device can be blocked with the help of | ||
14 | * this attribute. All devices have one of the following two values for | ||
15 | * the power/control file: | ||
16 | * | ||
17 | * + "auto\n" to allow the device to be power managed at run time; | ||
18 | * + "on\n" to prevent the device from being power managed at run time; | ||
19 | * | ||
20 | * The default for all devices is "auto", which means that devices may be | ||
21 | * subject to automatic power management, depending on their drivers. | ||
22 | * Changing this attribute to "on" prevents the driver from power managing | ||
23 | * the device at run time. Doing that while the device is suspended causes | ||
24 | * it to be woken up. | ||
25 | * | ||
10 | * wakeup - Report/change current wakeup option for device | 26 | * wakeup - Report/change current wakeup option for device |
11 | * | 27 | * |
12 | * Some devices support "wakeup" events, which are hardware signals | 28 | * Some devices support "wakeup" events, which are hardware signals |
@@ -38,11 +54,61 @@ | |||
38 | * wakeup events internally (unless they are disabled), keeping | 54 | * wakeup events internally (unless they are disabled), keeping |
39 | * their hardware in low power modes whenever they're unused. This | 55 | * their hardware in low power modes whenever they're unused. This |
40 | * saves runtime power, without requiring system-wide sleep states. | 56 | * saves runtime power, without requiring system-wide sleep states. |
57 | * | ||
58 | * async - Report/change current async suspend setting for the device | ||
59 | * | ||
60 | * Asynchronous suspend and resume of the device during system-wide power | ||
61 | * state transitions can be enabled by writing "enabled" to this file. | ||
62 | * Analogously, if "disabled" is written to this file, the device will be | ||
63 | * suspended and resumed synchronously. | ||
64 | * | ||
65 | * All devices have one of the following two values for power/async: | ||
66 | * | ||
67 | * + "enabled\n" to permit the asynchronous suspend/resume of the device; | ||
68 | * + "disabled\n" to forbid it; | ||
69 | * | ||
70 | * NOTE: It generally is unsafe to permit the asynchronous suspend/resume | ||
71 | * of a device unless it is certain that all of the PM dependencies of the | ||
72 | * device are known to the PM core. However, for some devices this | ||
73 | * attribute is set to "enabled" by bus type code or device drivers and in | ||
74 | * that cases it should be safe to leave the default value. | ||
41 | */ | 75 | */ |
42 | 76 | ||
43 | static const char enabled[] = "enabled"; | 77 | static const char enabled[] = "enabled"; |
44 | static const char disabled[] = "disabled"; | 78 | static const char disabled[] = "disabled"; |
45 | 79 | ||
80 | #ifdef CONFIG_PM_RUNTIME | ||
81 | static const char ctrl_auto[] = "auto"; | ||
82 | static const char ctrl_on[] = "on"; | ||
83 | |||
84 | static ssize_t control_show(struct device *dev, struct device_attribute *attr, | ||
85 | char *buf) | ||
86 | { | ||
87 | return sprintf(buf, "%s\n", | ||
88 | dev->power.runtime_auto ? ctrl_auto : ctrl_on); | ||
89 | } | ||
90 | |||
91 | static ssize_t control_store(struct device * dev, struct device_attribute *attr, | ||
92 | const char * buf, size_t n) | ||
93 | { | ||
94 | char *cp; | ||
95 | int len = n; | ||
96 | |||
97 | cp = memchr(buf, '\n', n); | ||
98 | if (cp) | ||
99 | len = cp - buf; | ||
100 | if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) | ||
101 | pm_runtime_allow(dev); | ||
102 | else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) | ||
103 | pm_runtime_forbid(dev); | ||
104 | else | ||
105 | return -EINVAL; | ||
106 | return n; | ||
107 | } | ||
108 | |||
109 | static DEVICE_ATTR(control, 0644, control_show, control_store); | ||
110 | #endif | ||
111 | |||
46 | static ssize_t | 112 | static ssize_t |
47 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 113 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
48 | { | 114 | { |
@@ -77,9 +143,43 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
77 | 143 | ||
78 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 144 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
79 | 145 | ||
146 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
147 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, | ||
148 | char *buf) | ||
149 | { | ||
150 | return sprintf(buf, "%s\n", | ||
151 | device_async_suspend_enabled(dev) ? enabled : disabled); | ||
152 | } | ||
153 | |||
154 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, | ||
155 | const char *buf, size_t n) | ||
156 | { | ||
157 | char *cp; | ||
158 | int len = n; | ||
159 | |||
160 | cp = memchr(buf, '\n', n); | ||
161 | if (cp) | ||
162 | len = cp - buf; | ||
163 | if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) | ||
164 | device_enable_async_suspend(dev); | ||
165 | else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) | ||
166 | device_disable_async_suspend(dev); | ||
167 | else | ||
168 | return -EINVAL; | ||
169 | return n; | ||
170 | } | ||
171 | |||
172 | static DEVICE_ATTR(async, 0644, async_show, async_store); | ||
173 | #endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ | ||
80 | 174 | ||
81 | static struct attribute * power_attrs[] = { | 175 | static struct attribute * power_attrs[] = { |
176 | #ifdef CONFIG_PM_RUNTIME | ||
177 | &dev_attr_control.attr, | ||
178 | #endif | ||
82 | &dev_attr_wakeup.attr, | 179 | &dev_attr_wakeup.attr, |
180 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
181 | &dev_attr_async.attr, | ||
182 | #endif | ||
83 | NULL, | 183 | NULL, |
84 | }; | 184 | }; |
85 | static struct attribute_group pm_attr_group = { | 185 | static struct attribute_group pm_attr_group = { |