diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 11:14:53 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 11:14:53 -0500 |
commit | c8940eca75e6d1ea57f6c491a30bd1023c64c9ad (patch) | |
tree | d68944ab9fa8ba3c77b18edc2bd836c7e355b23e | |
parent | 78c92a9fd4b6abbbc1fe1ec335c697cb4e63f252 (diff) | |
parent | 3ae22e8c8ac39daf88ae32f047fb23825be7c646 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
spi / PM: Support dev_pm_ops
PM: Prototype the pm_generic_ operations
PM / Runtime: Generic resume shouldn't set RPM_ACTIVE unconditionally
PM: Use dev_name() in core device suspend and resume routines
PM: Permit registration of parentless devices during system suspend
PM: Replace the device power.status field with a bit field
PM: Remove redundant checks from core device resume routines
PM: Use a different list of devices for each stage of device suspend
PM: Avoid compiler warning in pm_noirq_op()
PM: Use pm_wakeup_pending() in __device_suspend()
PM / Wakeup: Replace pm_check_wakeup_events() with pm_wakeup_pending()
PM: Prevent dpm_prepare() from returning errors unnecessarily
PM: Fix references to basic-pm-debugging.txt in drivers-testing.txt
PM / Runtime: Add synchronous runtime interface for interrupt handlers (v3)
PM / Hibernate: When failed, in_suspend should be reset
PM / Hibernate: hibernation_ops->leave should be checked too
Freezer: Fix a race during freezing of TASK_STOPPED tasks
PM: Use proper ccflag flag in kernel/power/Makefile
PM / Runtime: Fix comments to match runtime callback code
-rw-r--r-- | Documentation/power/drivers-testing.txt | 8 | ||||
-rw-r--r-- | Documentation/power/runtime_pm.txt | 31 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 6 | ||||
-rw-r--r-- | drivers/base/power/main.c | 174 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 47 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 20 | ||||
-rw-r--r-- | drivers/spi/spi.c | 92 | ||||
-rw-r--r-- | drivers/usb/core/driver.c | 7 | ||||
-rw-r--r-- | include/linux/device.h | 4 | ||||
-rw-r--r-- | include/linux/pm.h | 51 | ||||
-rw-r--r-- | include/linux/pm_runtime.h | 13 | ||||
-rw-r--r-- | include/linux/suspend.h | 4 | ||||
-rw-r--r-- | kernel/freezer.c | 9 | ||||
-rw-r--r-- | kernel/power/Makefile | 5 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 7 | ||||
-rw-r--r-- | kernel/power/process.c | 8 | ||||
-rw-r--r-- | kernel/power/suspend.c | 2 |
17 files changed, 282 insertions, 206 deletions
diff --git a/Documentation/power/drivers-testing.txt b/Documentation/power/drivers-testing.txt index 7f7a737f7f9f..638afdf4d6b8 100644 --- a/Documentation/power/drivers-testing.txt +++ b/Documentation/power/drivers-testing.txt | |||
@@ -23,10 +23,10 @@ Once you have resolved the suspend/resume-related problems with your test system | |||
23 | without the new driver, you are ready to test it: | 23 | without the new driver, you are ready to test it: |
24 | 24 | ||
25 | a) Build the driver as a module, load it and try the test modes of hibernation | 25 | a) Build the driver as a module, load it and try the test modes of hibernation |
26 | (see: Documents/power/basic-pm-debugging.txt, 1). | 26 | (see: Documentation/power/basic-pm-debugging.txt, 1). |
27 | 27 | ||
28 | b) Load the driver and attempt to hibernate in the "reboot", "shutdown" and | 28 | b) Load the driver and attempt to hibernate in the "reboot", "shutdown" and |
29 | "platform" modes (see: Documents/power/basic-pm-debugging.txt, 1). | 29 | "platform" modes (see: Documentation/power/basic-pm-debugging.txt, 1). |
30 | 30 | ||
31 | c) Compile the driver directly into the kernel and try the test modes of | 31 | c) Compile the driver directly into the kernel and try the test modes of |
32 | hibernation. | 32 | hibernation. |
@@ -34,12 +34,12 @@ c) Compile the driver directly into the kernel and try the test modes of | |||
34 | d) Attempt to hibernate with the driver compiled directly into the kernel | 34 | d) Attempt to hibernate with the driver compiled directly into the kernel |
35 | in the "reboot", "shutdown" and "platform" modes. | 35 | in the "reboot", "shutdown" and "platform" modes. |
36 | 36 | ||
37 | e) Try the test modes of suspend (see: Documents/power/basic-pm-debugging.txt, | 37 | e) Try the test modes of suspend (see: Documentation/power/basic-pm-debugging.txt, |
38 | 2). [As far as the STR tests are concerned, it should not matter whether or | 38 | 2). [As far as the STR tests are concerned, it should not matter whether or |
39 | not the driver is built as a module.] | 39 | not the driver is built as a module.] |
40 | 40 | ||
41 | f) Attempt to suspend to RAM using the s2ram tool with the driver loaded | 41 | f) Attempt to suspend to RAM using the s2ram tool with the driver loaded |
42 | (see: Documents/power/basic-pm-debugging.txt, 2). | 42 | (see: Documentation/power/basic-pm-debugging.txt, 2). |
43 | 43 | ||
44 | Each of the above tests should be repeated several times and the STD tests | 44 | Each of the above tests should be repeated several times and the STD tests |
45 | should be mixed with the STR tests. If any of them fails, the driver cannot be | 45 | should be mixed with the STR tests. If any of them fails, the driver cannot be |
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 41cc7b30d7dd..ffe55ffa540a 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
@@ -50,6 +50,15 @@ type's callbacks are not defined) of given device. The bus type, device type | |||
50 | and device class callbacks are referred to as subsystem-level callbacks in what | 50 | and device class callbacks are referred to as subsystem-level callbacks in what |
51 | follows. | 51 | follows. |
52 | 52 | ||
53 | By default, the callbacks are always invoked in process context with interrupts | ||
54 | enabled. However, subsystems can use the pm_runtime_irq_safe() helper function | ||
55 | to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume() | ||
56 | callbacks should be invoked in atomic context with interrupts disabled | ||
57 | (->runtime_idle() is still invoked the default way). This implies that these | ||
58 | callback routines must not block or sleep, but it also means that the | ||
59 | synchronous helper functions listed at the end of Section 4 can be used within | ||
60 | an interrupt handler or in an atomic context. | ||
61 | |||
53 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling | 62 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling |
54 | the suspend of the device as appropriate, which may, but need not include | 63 | the suspend of the device as appropriate, which may, but need not include |
55 | executing the device driver's own ->runtime_suspend() callback (from the | 64 | executing the device driver's own ->runtime_suspend() callback (from the |
@@ -237,6 +246,10 @@ defined in include/linux/pm.h: | |||
237 | Section 8); it may be modified only by the pm_runtime_no_callbacks() | 246 | Section 8); it may be modified only by the pm_runtime_no_callbacks() |
238 | helper function | 247 | helper function |
239 | 248 | ||
249 | unsigned int irq_safe; | ||
250 | - indicates that the ->runtime_suspend() and ->runtime_resume() callbacks | ||
251 | will be invoked with the spinlock held and interrupts disabled | ||
252 | |||
240 | unsigned int use_autosuspend; | 253 | unsigned int use_autosuspend; |
241 | - indicates that the device's driver supports delayed autosuspend (see | 254 | - indicates that the device's driver supports delayed autosuspend (see |
242 | Section 9); it may be modified only by the | 255 | Section 9); it may be modified only by the |
@@ -344,6 +357,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
344 | - decrement the device's usage counter; if the result is 0 then run | 357 | - decrement the device's usage counter; if the result is 0 then run |
345 | pm_runtime_idle(dev) and return its result | 358 | pm_runtime_idle(dev) and return its result |
346 | 359 | ||
360 | int pm_runtime_put_sync_suspend(struct device *dev); | ||
361 | - decrement the device's usage counter; if the result is 0 then run | ||
362 | pm_runtime_suspend(dev) and return its result | ||
363 | |||
347 | int pm_runtime_put_sync_autosuspend(struct device *dev); | 364 | int pm_runtime_put_sync_autosuspend(struct device *dev); |
348 | - decrement the device's usage counter; if the result is 0 then run | 365 | - decrement the device's usage counter; if the result is 0 then run |
349 | pm_runtime_autosuspend(dev) and return its result | 366 | pm_runtime_autosuspend(dev) and return its result |
@@ -397,6 +414,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
397 | PM attributes from /sys/devices/.../power (or prevent them from being | 414 | PM attributes from /sys/devices/.../power (or prevent them from being |
398 | added when the device is registered) | 415 | added when the device is registered) |
399 | 416 | ||
417 | void pm_runtime_irq_safe(struct device *dev); | ||
418 | - set the power.irq_safe flag for the device, causing the runtime-PM | ||
419 | suspend and resume callbacks (but not the idle callback) to be invoked | ||
420 | with interrupts disabled | ||
421 | |||
400 | void pm_runtime_mark_last_busy(struct device *dev); | 422 | void pm_runtime_mark_last_busy(struct device *dev); |
401 | - set the power.last_busy field to the current time | 423 | - set the power.last_busy field to the current time |
402 | 424 | ||
@@ -438,6 +460,15 @@ pm_runtime_suspended() | |||
438 | pm_runtime_mark_last_busy() | 460 | pm_runtime_mark_last_busy() |
439 | pm_runtime_autosuspend_expiration() | 461 | pm_runtime_autosuspend_expiration() |
440 | 462 | ||
463 | If pm_runtime_irq_safe() has been called for a device then the following helper | ||
464 | functions may also be used in interrupt context: | ||
465 | |||
466 | pm_runtime_suspend() | ||
467 | pm_runtime_autosuspend() | ||
468 | pm_runtime_resume() | ||
469 | pm_runtime_get_sync() | ||
470 | pm_runtime_put_sync_suspend() | ||
471 | |||
441 | 5. Run-time PM Initialization, Device Probing and Removal | 472 | 5. Run-time PM Initialization, Device Probing and Removal |
442 | 473 | ||
443 | Initially, the run-time PM is disabled for all devices, which means that the | 474 | Initially, the run-time PM is disabled for all devices, which means that the |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 81f2c84697f4..42f97f925629 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | |||
39 | * | 39 | * |
40 | * If PM operations are defined for the @dev's driver and they include | 40 | * If PM operations are defined for the @dev's driver and they include |
41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | 41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, |
42 | * return -EINVAL. | 42 | * return 0. |
43 | */ | 43 | */ |
44 | int pm_generic_runtime_suspend(struct device *dev) | 44 | int pm_generic_runtime_suspend(struct device *dev) |
45 | { | 45 | { |
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | |||
58 | * | 58 | * |
59 | * If PM operations are defined for the @dev's driver and they include | 59 | * If PM operations are defined for the @dev's driver and they include |
60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | 60 | * ->runtime_resume(), execute it and return its error code. Otherwise, |
61 | * return -EINVAL. | 61 | * return 0. |
62 | */ | 62 | */ |
63 | int pm_generic_runtime_resume(struct device *dev) | 63 | int pm_generic_runtime_resume(struct device *dev) |
64 | { | 64 | { |
@@ -185,7 +185,7 @@ static int __pm_generic_resume(struct device *dev, int event) | |||
185 | return 0; | 185 | return 0; |
186 | 186 | ||
187 | ret = callback(dev); | 187 | ret = callback(dev); |
188 | if (!ret) { | 188 | if (!ret && pm_runtime_enabled(dev)) { |
189 | pm_runtime_disable(dev); | 189 | pm_runtime_disable(dev); |
190 | pm_runtime_set_active(dev); | 190 | pm_runtime_set_active(dev); |
191 | pm_runtime_enable(dev); | 191 | pm_runtime_enable(dev); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ead3e79d6fcf..2a52270aeb30 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/async.h> | 28 | #include <linux/async.h> |
29 | #include <linux/suspend.h> | ||
29 | 30 | ||
30 | #include "../base.h" | 31 | #include "../base.h" |
31 | #include "power.h" | 32 | #include "power.h" |
@@ -41,16 +42,13 @@ | |||
41 | */ | 42 | */ |
42 | 43 | ||
43 | LIST_HEAD(dpm_list); | 44 | LIST_HEAD(dpm_list); |
45 | LIST_HEAD(dpm_prepared_list); | ||
46 | LIST_HEAD(dpm_suspended_list); | ||
47 | LIST_HEAD(dpm_noirq_list); | ||
44 | 48 | ||
45 | static DEFINE_MUTEX(dpm_list_mtx); | 49 | static DEFINE_MUTEX(dpm_list_mtx); |
46 | static pm_message_t pm_transition; | 50 | static pm_message_t pm_transition; |
47 | 51 | ||
48 | /* | ||
49 | * Set once the preparation of devices for a PM transition has started, reset | ||
50 | * before starting to resume devices. Protected by dpm_list_mtx. | ||
51 | */ | ||
52 | static bool transition_started; | ||
53 | |||
54 | static int async_error; | 52 | static int async_error; |
55 | 53 | ||
56 | /** | 54 | /** |
@@ -59,7 +57,7 @@ static int async_error; | |||
59 | */ | 57 | */ |
60 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
61 | { | 59 | { |
62 | dev->power.status = DPM_ON; | 60 | dev->power.in_suspend = false; |
63 | init_completion(&dev->power.completion); | 61 | init_completion(&dev->power.completion); |
64 | complete_all(&dev->power.completion); | 62 | complete_all(&dev->power.completion); |
65 | dev->power.wakeup = NULL; | 63 | dev->power.wakeup = NULL; |
@@ -90,22 +88,11 @@ void device_pm_unlock(void) | |||
90 | void device_pm_add(struct device *dev) | 88 | void device_pm_add(struct device *dev) |
91 | { | 89 | { |
92 | pr_debug("PM: Adding info for %s:%s\n", | 90 | pr_debug("PM: Adding info for %s:%s\n", |
93 | dev->bus ? dev->bus->name : "No Bus", | 91 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
94 | kobject_name(&dev->kobj)); | ||
95 | mutex_lock(&dpm_list_mtx); | 92 | mutex_lock(&dpm_list_mtx); |
96 | if (dev->parent) { | 93 | if (dev->parent && dev->parent->power.in_suspend) |
97 | if (dev->parent->power.status >= DPM_SUSPENDING) | 94 | dev_warn(dev, "parent %s should not be sleeping\n", |
98 | dev_warn(dev, "parent %s should not be sleeping\n", | 95 | dev_name(dev->parent)); |
99 | dev_name(dev->parent)); | ||
100 | } else if (transition_started) { | ||
101 | /* | ||
102 | * We refuse to register parentless devices while a PM | ||
103 | * transition is in progress in order to avoid leaving them | ||
104 | * unhandled down the road | ||
105 | */ | ||
106 | dev_WARN(dev, "Parentless device registered during a PM transaction\n"); | ||
107 | } | ||
108 | |||
109 | list_add_tail(&dev->power.entry, &dpm_list); | 96 | list_add_tail(&dev->power.entry, &dpm_list); |
110 | mutex_unlock(&dpm_list_mtx); | 97 | mutex_unlock(&dpm_list_mtx); |
111 | } | 98 | } |
@@ -117,8 +104,7 @@ void device_pm_add(struct device *dev) | |||
117 | void device_pm_remove(struct device *dev) | 104 | void device_pm_remove(struct device *dev) |
118 | { | 105 | { |
119 | pr_debug("PM: Removing info for %s:%s\n", | 106 | pr_debug("PM: Removing info for %s:%s\n", |
120 | dev->bus ? dev->bus->name : "No Bus", | 107 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
121 | kobject_name(&dev->kobj)); | ||
122 | complete_all(&dev->power.completion); | 108 | complete_all(&dev->power.completion); |
123 | mutex_lock(&dpm_list_mtx); | 109 | mutex_lock(&dpm_list_mtx); |
124 | list_del_init(&dev->power.entry); | 110 | list_del_init(&dev->power.entry); |
@@ -135,10 +121,8 @@ void device_pm_remove(struct device *dev) | |||
135 | void device_pm_move_before(struct device *deva, struct device *devb) | 121 | void device_pm_move_before(struct device *deva, struct device *devb) |
136 | { | 122 | { |
137 | pr_debug("PM: Moving %s:%s before %s:%s\n", | 123 | pr_debug("PM: Moving %s:%s before %s:%s\n", |
138 | deva->bus ? deva->bus->name : "No Bus", | 124 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
139 | kobject_name(&deva->kobj), | 125 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
140 | devb->bus ? devb->bus->name : "No Bus", | ||
141 | kobject_name(&devb->kobj)); | ||
142 | /* Delete deva from dpm_list and reinsert before devb. */ | 126 | /* Delete deva from dpm_list and reinsert before devb. */ |
143 | list_move_tail(&deva->power.entry, &devb->power.entry); | 127 | list_move_tail(&deva->power.entry, &devb->power.entry); |
144 | } | 128 | } |
@@ -151,10 +135,8 @@ void device_pm_move_before(struct device *deva, struct device *devb) | |||
151 | void device_pm_move_after(struct device *deva, struct device *devb) | 135 | void device_pm_move_after(struct device *deva, struct device *devb) |
152 | { | 136 | { |
153 | pr_debug("PM: Moving %s:%s after %s:%s\n", | 137 | pr_debug("PM: Moving %s:%s after %s:%s\n", |
154 | deva->bus ? deva->bus->name : "No Bus", | 138 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
155 | kobject_name(&deva->kobj), | 139 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
156 | devb->bus ? devb->bus->name : "No Bus", | ||
157 | kobject_name(&devb->kobj)); | ||
158 | /* Delete deva from dpm_list and reinsert after devb. */ | 140 | /* Delete deva from dpm_list and reinsert after devb. */ |
159 | list_move(&deva->power.entry, &devb->power.entry); | 141 | list_move(&deva->power.entry, &devb->power.entry); |
160 | } | 142 | } |
@@ -166,8 +148,7 @@ void device_pm_move_after(struct device *deva, struct device *devb) | |||
166 | void device_pm_move_last(struct device *dev) | 148 | void device_pm_move_last(struct device *dev) |
167 | { | 149 | { |
168 | pr_debug("PM: Moving %s:%s to end of list\n", | 150 | pr_debug("PM: Moving %s:%s to end of list\n", |
169 | dev->bus ? dev->bus->name : "No Bus", | 151 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
170 | kobject_name(&dev->kobj)); | ||
171 | list_move_tail(&dev->power.entry, &dpm_list); | 152 | list_move_tail(&dev->power.entry, &dpm_list); |
172 | } | 153 | } |
173 | 154 | ||
@@ -303,7 +284,7 @@ static int pm_noirq_op(struct device *dev, | |||
303 | pm_message_t state) | 284 | pm_message_t state) |
304 | { | 285 | { |
305 | int error = 0; | 286 | int error = 0; |
306 | ktime_t calltime, delta, rettime; | 287 | ktime_t calltime = ktime_set(0, 0), delta, rettime; |
307 | 288 | ||
308 | if (initcall_debug) { | 289 | if (initcall_debug) { |
309 | pr_info("calling %s+ @ %i, parent: %s\n", | 290 | pr_info("calling %s+ @ %i, parent: %s\n", |
@@ -405,7 +386,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
405 | int error) | 386 | int error) |
406 | { | 387 | { |
407 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | 388 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", |
408 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 389 | dev_name(dev), pm_verb(state.event), info, error); |
409 | } | 390 | } |
410 | 391 | ||
411 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | 392 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) |
@@ -475,33 +456,24 @@ End: | |||
475 | */ | 456 | */ |
476 | void dpm_resume_noirq(pm_message_t state) | 457 | void dpm_resume_noirq(pm_message_t state) |
477 | { | 458 | { |
478 | struct list_head list; | ||
479 | ktime_t starttime = ktime_get(); | 459 | ktime_t starttime = ktime_get(); |
480 | 460 | ||
481 | INIT_LIST_HEAD(&list); | ||
482 | mutex_lock(&dpm_list_mtx); | 461 | mutex_lock(&dpm_list_mtx); |
483 | transition_started = false; | 462 | while (!list_empty(&dpm_noirq_list)) { |
484 | while (!list_empty(&dpm_list)) { | 463 | struct device *dev = to_device(dpm_noirq_list.next); |
485 | struct device *dev = to_device(dpm_list.next); | 464 | int error; |
486 | 465 | ||
487 | get_device(dev); | 466 | get_device(dev); |
488 | if (dev->power.status > DPM_OFF) { | 467 | list_move_tail(&dev->power.entry, &dpm_suspended_list); |
489 | int error; | 468 | mutex_unlock(&dpm_list_mtx); |
490 | |||
491 | dev->power.status = DPM_OFF; | ||
492 | mutex_unlock(&dpm_list_mtx); | ||
493 | 469 | ||
494 | error = device_resume_noirq(dev, state); | 470 | error = device_resume_noirq(dev, state); |
471 | if (error) | ||
472 | pm_dev_err(dev, state, " early", error); | ||
495 | 473 | ||
496 | mutex_lock(&dpm_list_mtx); | 474 | mutex_lock(&dpm_list_mtx); |
497 | if (error) | ||
498 | pm_dev_err(dev, state, " early", error); | ||
499 | } | ||
500 | if (!list_empty(&dev->power.entry)) | ||
501 | list_move_tail(&dev->power.entry, &list); | ||
502 | put_device(dev); | 475 | put_device(dev); |
503 | } | 476 | } |
504 | list_splice(&list, &dpm_list); | ||
505 | mutex_unlock(&dpm_list_mtx); | 477 | mutex_unlock(&dpm_list_mtx); |
506 | dpm_show_time(starttime, state, "early"); | 478 | dpm_show_time(starttime, state, "early"); |
507 | resume_device_irqs(); | 479 | resume_device_irqs(); |
@@ -544,7 +516,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
544 | dpm_wait(dev->parent, async); | 516 | dpm_wait(dev->parent, async); |
545 | device_lock(dev); | 517 | device_lock(dev); |
546 | 518 | ||
547 | dev->power.status = DPM_RESUMING; | 519 | dev->power.in_suspend = false; |
548 | 520 | ||
549 | if (dev->bus) { | 521 | if (dev->bus) { |
550 | if (dev->bus->pm) { | 522 | if (dev->bus->pm) { |
@@ -610,19 +582,14 @@ static bool is_async(struct device *dev) | |||
610 | */ | 582 | */ |
611 | static void dpm_resume(pm_message_t state) | 583 | static void dpm_resume(pm_message_t state) |
612 | { | 584 | { |
613 | struct list_head list; | ||
614 | struct device *dev; | 585 | struct device *dev; |
615 | ktime_t starttime = ktime_get(); | 586 | ktime_t starttime = ktime_get(); |
616 | 587 | ||
617 | INIT_LIST_HEAD(&list); | ||
618 | mutex_lock(&dpm_list_mtx); | 588 | mutex_lock(&dpm_list_mtx); |
619 | pm_transition = state; | 589 | pm_transition = state; |
620 | async_error = 0; | 590 | async_error = 0; |
621 | 591 | ||
622 | list_for_each_entry(dev, &dpm_list, power.entry) { | 592 | list_for_each_entry(dev, &dpm_suspended_list, power.entry) { |
623 | if (dev->power.status < DPM_OFF) | ||
624 | continue; | ||
625 | |||
626 | INIT_COMPLETION(dev->power.completion); | 593 | INIT_COMPLETION(dev->power.completion); |
627 | if (is_async(dev)) { | 594 | if (is_async(dev)) { |
628 | get_device(dev); | 595 | get_device(dev); |
@@ -630,28 +597,24 @@ static void dpm_resume(pm_message_t state) | |||
630 | } | 597 | } |
631 | } | 598 | } |
632 | 599 | ||
633 | while (!list_empty(&dpm_list)) { | 600 | while (!list_empty(&dpm_suspended_list)) { |
634 | dev = to_device(dpm_list.next); | 601 | dev = to_device(dpm_suspended_list.next); |
635 | get_device(dev); | 602 | get_device(dev); |
636 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { | 603 | if (!is_async(dev)) { |
637 | int error; | 604 | int error; |
638 | 605 | ||
639 | mutex_unlock(&dpm_list_mtx); | 606 | mutex_unlock(&dpm_list_mtx); |
640 | 607 | ||
641 | error = device_resume(dev, state, false); | 608 | error = device_resume(dev, state, false); |
642 | |||
643 | mutex_lock(&dpm_list_mtx); | ||
644 | if (error) | 609 | if (error) |
645 | pm_dev_err(dev, state, "", error); | 610 | pm_dev_err(dev, state, "", error); |
646 | } else if (dev->power.status == DPM_SUSPENDING) { | 611 | |
647 | /* Allow new children of the device to be registered */ | 612 | mutex_lock(&dpm_list_mtx); |
648 | dev->power.status = DPM_RESUMING; | ||
649 | } | 613 | } |
650 | if (!list_empty(&dev->power.entry)) | 614 | if (!list_empty(&dev->power.entry)) |
651 | list_move_tail(&dev->power.entry, &list); | 615 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
652 | put_device(dev); | 616 | put_device(dev); |
653 | } | 617 | } |
654 | list_splice(&list, &dpm_list); | ||
655 | mutex_unlock(&dpm_list_mtx); | 618 | mutex_unlock(&dpm_list_mtx); |
656 | async_synchronize_full(); | 619 | async_synchronize_full(); |
657 | dpm_show_time(starttime, state, NULL); | 620 | dpm_show_time(starttime, state, NULL); |
@@ -697,22 +660,18 @@ static void dpm_complete(pm_message_t state) | |||
697 | 660 | ||
698 | INIT_LIST_HEAD(&list); | 661 | INIT_LIST_HEAD(&list); |
699 | mutex_lock(&dpm_list_mtx); | 662 | mutex_lock(&dpm_list_mtx); |
700 | transition_started = false; | 663 | while (!list_empty(&dpm_prepared_list)) { |
701 | while (!list_empty(&dpm_list)) { | 664 | struct device *dev = to_device(dpm_prepared_list.prev); |
702 | struct device *dev = to_device(dpm_list.prev); | ||
703 | 665 | ||
704 | get_device(dev); | 666 | get_device(dev); |
705 | if (dev->power.status > DPM_ON) { | 667 | dev->power.in_suspend = false; |
706 | dev->power.status = DPM_ON; | 668 | list_move(&dev->power.entry, &list); |
707 | mutex_unlock(&dpm_list_mtx); | 669 | mutex_unlock(&dpm_list_mtx); |
708 | 670 | ||
709 | device_complete(dev, state); | 671 | device_complete(dev, state); |
710 | pm_runtime_put_sync(dev); | 672 | pm_runtime_put_sync(dev); |
711 | 673 | ||
712 | mutex_lock(&dpm_list_mtx); | 674 | mutex_lock(&dpm_list_mtx); |
713 | } | ||
714 | if (!list_empty(&dev->power.entry)) | ||
715 | list_move(&dev->power.entry, &list); | ||
716 | put_device(dev); | 675 | put_device(dev); |
717 | } | 676 | } |
718 | list_splice(&list, &dpm_list); | 677 | list_splice(&list, &dpm_list); |
@@ -802,15 +761,13 @@ End: | |||
802 | */ | 761 | */ |
803 | int dpm_suspend_noirq(pm_message_t state) | 762 | int dpm_suspend_noirq(pm_message_t state) |
804 | { | 763 | { |
805 | struct list_head list; | ||
806 | ktime_t starttime = ktime_get(); | 764 | ktime_t starttime = ktime_get(); |
807 | int error = 0; | 765 | int error = 0; |
808 | 766 | ||
809 | INIT_LIST_HEAD(&list); | ||
810 | suspend_device_irqs(); | 767 | suspend_device_irqs(); |
811 | mutex_lock(&dpm_list_mtx); | 768 | mutex_lock(&dpm_list_mtx); |
812 | while (!list_empty(&dpm_list)) { | 769 | while (!list_empty(&dpm_suspended_list)) { |
813 | struct device *dev = to_device(dpm_list.prev); | 770 | struct device *dev = to_device(dpm_suspended_list.prev); |
814 | 771 | ||
815 | get_device(dev); | 772 | get_device(dev); |
816 | mutex_unlock(&dpm_list_mtx); | 773 | mutex_unlock(&dpm_list_mtx); |
@@ -823,12 +780,10 @@ int dpm_suspend_noirq(pm_message_t state) | |||
823 | put_device(dev); | 780 | put_device(dev); |
824 | break; | 781 | break; |
825 | } | 782 | } |
826 | dev->power.status = DPM_OFF_IRQ; | ||
827 | if (!list_empty(&dev->power.entry)) | 783 | if (!list_empty(&dev->power.entry)) |
828 | list_move(&dev->power.entry, &list); | 784 | list_move(&dev->power.entry, &dpm_noirq_list); |
829 | put_device(dev); | 785 | put_device(dev); |
830 | } | 786 | } |
831 | list_splice_tail(&list, &dpm_list); | ||
832 | mutex_unlock(&dpm_list_mtx); | 787 | mutex_unlock(&dpm_list_mtx); |
833 | if (error) | 788 | if (error) |
834 | dpm_resume_noirq(resume_event(state)); | 789 | dpm_resume_noirq(resume_event(state)); |
@@ -876,6 +831,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
876 | if (async_error) | 831 | if (async_error) |
877 | goto End; | 832 | goto End; |
878 | 833 | ||
834 | if (pm_wakeup_pending()) { | ||
835 | async_error = -EBUSY; | ||
836 | goto End; | ||
837 | } | ||
838 | |||
879 | if (dev->class) { | 839 | if (dev->class) { |
880 | if (dev->class->pm) { | 840 | if (dev->class->pm) { |
881 | pm_dev_dbg(dev, state, "class "); | 841 | pm_dev_dbg(dev, state, "class "); |
@@ -907,9 +867,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
907 | } | 867 | } |
908 | } | 868 | } |
909 | 869 | ||
910 | if (!error) | ||
911 | dev->power.status = DPM_OFF; | ||
912 | |||
913 | End: | 870 | End: |
914 | device_unlock(dev); | 871 | device_unlock(dev); |
915 | complete_all(&dev->power.completion); | 872 | complete_all(&dev->power.completion); |
@@ -951,16 +908,14 @@ static int device_suspend(struct device *dev) | |||
951 | */ | 908 | */ |
952 | static int dpm_suspend(pm_message_t state) | 909 | static int dpm_suspend(pm_message_t state) |
953 | { | 910 | { |
954 | struct list_head list; | ||
955 | ktime_t starttime = ktime_get(); | 911 | ktime_t starttime = ktime_get(); |
956 | int error = 0; | 912 | int error = 0; |
957 | 913 | ||
958 | INIT_LIST_HEAD(&list); | ||
959 | mutex_lock(&dpm_list_mtx); | 914 | mutex_lock(&dpm_list_mtx); |
960 | pm_transition = state; | 915 | pm_transition = state; |
961 | async_error = 0; | 916 | async_error = 0; |
962 | while (!list_empty(&dpm_list)) { | 917 | while (!list_empty(&dpm_prepared_list)) { |
963 | struct device *dev = to_device(dpm_list.prev); | 918 | struct device *dev = to_device(dpm_prepared_list.prev); |
964 | 919 | ||
965 | get_device(dev); | 920 | get_device(dev); |
966 | mutex_unlock(&dpm_list_mtx); | 921 | mutex_unlock(&dpm_list_mtx); |
@@ -974,12 +929,11 @@ static int dpm_suspend(pm_message_t state) | |||
974 | break; | 929 | break; |
975 | } | 930 | } |
976 | if (!list_empty(&dev->power.entry)) | 931 | if (!list_empty(&dev->power.entry)) |
977 | list_move(&dev->power.entry, &list); | 932 | list_move(&dev->power.entry, &dpm_suspended_list); |
978 | put_device(dev); | 933 | put_device(dev); |
979 | if (async_error) | 934 | if (async_error) |
980 | break; | 935 | break; |
981 | } | 936 | } |
982 | list_splice(&list, dpm_list.prev); | ||
983 | mutex_unlock(&dpm_list_mtx); | 937 | mutex_unlock(&dpm_list_mtx); |
984 | async_synchronize_full(); | 938 | async_synchronize_full(); |
985 | if (!error) | 939 | if (!error) |
@@ -1038,22 +992,20 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1038 | */ | 992 | */ |
1039 | static int dpm_prepare(pm_message_t state) | 993 | static int dpm_prepare(pm_message_t state) |
1040 | { | 994 | { |
1041 | struct list_head list; | ||
1042 | int error = 0; | 995 | int error = 0; |
1043 | 996 | ||
1044 | INIT_LIST_HEAD(&list); | ||
1045 | mutex_lock(&dpm_list_mtx); | 997 | mutex_lock(&dpm_list_mtx); |
1046 | transition_started = true; | ||
1047 | while (!list_empty(&dpm_list)) { | 998 | while (!list_empty(&dpm_list)) { |
1048 | struct device *dev = to_device(dpm_list.next); | 999 | struct device *dev = to_device(dpm_list.next); |
1049 | 1000 | ||
1050 | get_device(dev); | 1001 | get_device(dev); |
1051 | dev->power.status = DPM_PREPARING; | ||
1052 | mutex_unlock(&dpm_list_mtx); | 1002 | mutex_unlock(&dpm_list_mtx); |
1053 | 1003 | ||
1054 | pm_runtime_get_noresume(dev); | 1004 | pm_runtime_get_noresume(dev); |
1055 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 1005 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
1056 | /* Wake-up requested during system sleep transition. */ | 1006 | pm_wakeup_event(dev, 0); |
1007 | |||
1008 | if (pm_wakeup_pending()) { | ||
1057 | pm_runtime_put_sync(dev); | 1009 | pm_runtime_put_sync(dev); |
1058 | error = -EBUSY; | 1010 | error = -EBUSY; |
1059 | } else { | 1011 | } else { |
@@ -1062,24 +1014,22 @@ static int dpm_prepare(pm_message_t state) | |||
1062 | 1014 | ||
1063 | mutex_lock(&dpm_list_mtx); | 1015 | mutex_lock(&dpm_list_mtx); |
1064 | if (error) { | 1016 | if (error) { |
1065 | dev->power.status = DPM_ON; | ||
1066 | if (error == -EAGAIN) { | 1017 | if (error == -EAGAIN) { |
1067 | put_device(dev); | 1018 | put_device(dev); |
1068 | error = 0; | 1019 | error = 0; |
1069 | continue; | 1020 | continue; |
1070 | } | 1021 | } |
1071 | printk(KERN_ERR "PM: Failed to prepare device %s " | 1022 | printk(KERN_INFO "PM: Device %s not prepared " |
1072 | "for power transition: error %d\n", | 1023 | "for power transition: code %d\n", |
1073 | kobject_name(&dev->kobj), error); | 1024 | dev_name(dev), error); |
1074 | put_device(dev); | 1025 | put_device(dev); |
1075 | break; | 1026 | break; |
1076 | } | 1027 | } |
1077 | dev->power.status = DPM_SUSPENDING; | 1028 | dev->power.in_suspend = true; |
1078 | if (!list_empty(&dev->power.entry)) | 1029 | if (!list_empty(&dev->power.entry)) |
1079 | list_move_tail(&dev->power.entry, &list); | 1030 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
1080 | put_device(dev); | 1031 | put_device(dev); |
1081 | } | 1032 | } |
1082 | list_splice(&list, &dpm_list); | ||
1083 | mutex_unlock(&dpm_list_mtx); | 1033 | mutex_unlock(&dpm_list_mtx); |
1084 | return error; | 1034 | return error; |
1085 | } | 1035 | } |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 02c652be83e7..656493a5e073 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -250,13 +250,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
250 | if (!cb) | 250 | if (!cb) |
251 | return -ENOSYS; | 251 | return -ENOSYS; |
252 | 252 | ||
253 | spin_unlock_irq(&dev->power.lock); | 253 | if (dev->power.irq_safe) { |
254 | retval = cb(dev); | ||
255 | } else { | ||
256 | spin_unlock_irq(&dev->power.lock); | ||
254 | 257 | ||
255 | retval = cb(dev); | 258 | retval = cb(dev); |
256 | 259 | ||
257 | spin_lock_irq(&dev->power.lock); | 260 | spin_lock_irq(&dev->power.lock); |
261 | } | ||
258 | dev->power.runtime_error = retval; | 262 | dev->power.runtime_error = retval; |
259 | |||
260 | return retval; | 263 | return retval; |
261 | } | 264 | } |
262 | 265 | ||
@@ -404,7 +407,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
404 | goto out; | 407 | goto out; |
405 | } | 408 | } |
406 | 409 | ||
407 | if (parent && !parent->power.ignore_children) { | 410 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { |
408 | spin_unlock_irq(&dev->power.lock); | 411 | spin_unlock_irq(&dev->power.lock); |
409 | 412 | ||
410 | pm_request_idle(parent); | 413 | pm_request_idle(parent); |
@@ -527,10 +530,13 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
527 | 530 | ||
528 | if (!parent && dev->parent) { | 531 | if (!parent && dev->parent) { |
529 | /* | 532 | /* |
530 | * Increment the parent's resume counter and resume it if | 533 | * Increment the parent's usage counter and resume it if |
531 | * necessary. | 534 | * necessary. Not needed if dev is irq-safe; then the |
535 | * parent is permanently resumed. | ||
532 | */ | 536 | */ |
533 | parent = dev->parent; | 537 | parent = dev->parent; |
538 | if (dev->power.irq_safe) | ||
539 | goto skip_parent; | ||
534 | spin_unlock(&dev->power.lock); | 540 | spin_unlock(&dev->power.lock); |
535 | 541 | ||
536 | pm_runtime_get_noresume(parent); | 542 | pm_runtime_get_noresume(parent); |
@@ -553,6 +559,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
553 | goto out; | 559 | goto out; |
554 | goto repeat; | 560 | goto repeat; |
555 | } | 561 | } |
562 | skip_parent: | ||
556 | 563 | ||
557 | if (dev->power.no_callbacks) | 564 | if (dev->power.no_callbacks) |
558 | goto no_callback; /* Assume success. */ | 565 | goto no_callback; /* Assume success. */ |
@@ -584,7 +591,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
584 | rpm_idle(dev, RPM_ASYNC); | 591 | rpm_idle(dev, RPM_ASYNC); |
585 | 592 | ||
586 | out: | 593 | out: |
587 | if (parent) { | 594 | if (parent && !dev->power.irq_safe) { |
588 | spin_unlock_irq(&dev->power.lock); | 595 | spin_unlock_irq(&dev->power.lock); |
589 | 596 | ||
590 | pm_runtime_put(parent); | 597 | pm_runtime_put(parent); |
@@ -1065,7 +1072,6 @@ EXPORT_SYMBOL_GPL(pm_runtime_allow); | |||
1065 | * Set the power.no_callbacks flag, which tells the PM core that this | 1072 | * Set the power.no_callbacks flag, which tells the PM core that this |
1066 | * device is power-managed through its parent and has no run-time PM | 1073 | * device is power-managed through its parent and has no run-time PM |
1067 | * callbacks of its own. The run-time sysfs attributes will be removed. | 1074 | * callbacks of its own. The run-time sysfs attributes will be removed. |
1068 | * | ||
1069 | */ | 1075 | */ |
1070 | void pm_runtime_no_callbacks(struct device *dev) | 1076 | void pm_runtime_no_callbacks(struct device *dev) |
1071 | { | 1077 | { |
@@ -1078,6 +1084,27 @@ void pm_runtime_no_callbacks(struct device *dev) | |||
1078 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); | 1084 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); |
1079 | 1085 | ||
1080 | /** | 1086 | /** |
1087 | * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. | ||
1088 | * @dev: Device to handle | ||
1089 | * | ||
1090 | * Set the power.irq_safe flag, which tells the PM core that the | ||
1091 | * ->runtime_suspend() and ->runtime_resume() callbacks for this device should | ||
1092 | * always be invoked with the spinlock held and interrupts disabled. It also | ||
1093 | * causes the parent's usage counter to be permanently incremented, preventing | ||
1094 | * the parent from runtime suspending -- otherwise an irq-safe child might have | ||
1095 | * to wait for a non-irq-safe parent. | ||
1096 | */ | ||
1097 | void pm_runtime_irq_safe(struct device *dev) | ||
1098 | { | ||
1099 | if (dev->parent) | ||
1100 | pm_runtime_get_sync(dev->parent); | ||
1101 | spin_lock_irq(&dev->power.lock); | ||
1102 | dev->power.irq_safe = 1; | ||
1103 | spin_unlock_irq(&dev->power.lock); | ||
1104 | } | ||
1105 | EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); | ||
1106 | |||
1107 | /** | ||
1081 | * update_autosuspend - Handle a change to a device's autosuspend settings. | 1108 | * update_autosuspend - Handle a change to a device's autosuspend settings. |
1082 | * @dev: Device to handle. | 1109 | * @dev: Device to handle. |
1083 | * @old_delay: The former autosuspend_delay value. | 1110 | * @old_delay: The former autosuspend_delay value. |
@@ -1199,4 +1226,6 @@ void pm_runtime_remove(struct device *dev) | |||
1199 | /* Change the status back to 'suspended' to match the initial status. */ | 1226 | /* Change the status back to 'suspended' to match the initial status. */ |
1200 | if (dev->power.runtime_status == RPM_ACTIVE) | 1227 | if (dev->power.runtime_status == RPM_ACTIVE) |
1201 | pm_runtime_set_suspended(dev); | 1228 | pm_runtime_set_suspended(dev); |
1229 | if (dev->power.irq_safe && dev->parent) | ||
1230 | pm_runtime_put_sync(dev->parent); | ||
1202 | } | 1231 | } |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 71c5528e1c35..8ec406d8f548 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -542,26 +542,26 @@ static void pm_wakeup_update_hit_counts(void) | |||
542 | } | 542 | } |
543 | 543 | ||
544 | /** | 544 | /** |
545 | * pm_check_wakeup_events - Check for new wakeup events. | 545 | * pm_wakeup_pending - Check if power transition in progress should be aborted. |
546 | * | 546 | * |
547 | * Compare the current number of registered wakeup events with its preserved | 547 | * Compare the current number of registered wakeup events with its preserved |
548 | * value from the past to check if new wakeup events have been registered since | 548 | * value from the past and return true if new wakeup events have been registered |
549 | * the old value was stored. Check if the current number of wakeup events being | 549 | * since the old value was stored. Also return true if the current number of |
550 | * processed is zero. | 550 | * wakeup events being processed is different from zero. |
551 | */ | 551 | */ |
552 | bool pm_check_wakeup_events(void) | 552 | bool pm_wakeup_pending(void) |
553 | { | 553 | { |
554 | unsigned long flags; | 554 | unsigned long flags; |
555 | bool ret = true; | 555 | bool ret = false; |
556 | 556 | ||
557 | spin_lock_irqsave(&events_lock, flags); | 557 | spin_lock_irqsave(&events_lock, flags); |
558 | if (events_check_enabled) { | 558 | if (events_check_enabled) { |
559 | ret = ((unsigned int)atomic_read(&event_count) == saved_count) | 559 | ret = ((unsigned int)atomic_read(&event_count) != saved_count) |
560 | && !atomic_read(&events_in_progress); | 560 | || atomic_read(&events_in_progress); |
561 | events_check_enabled = ret; | 561 | events_check_enabled = !ret; |
562 | } | 562 | } |
563 | spin_unlock_irqrestore(&events_lock, flags); | 563 | spin_unlock_irqrestore(&events_lock, flags); |
564 | if (!ret) | 564 | if (ret) |
565 | pm_wakeup_update_hit_counts(); | 565 | pm_wakeup_update_hit_counts(); |
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b02d0cbce890..34bb17f03019 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mod_devicetable.h> | 28 | #include <linux/mod_devicetable.h> |
29 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
30 | #include <linux/of_spi.h> | 30 | #include <linux/of_spi.h> |
31 | #include <linux/pm_runtime.h> | ||
31 | 32 | ||
32 | static void spidev_release(struct device *dev) | 33 | static void spidev_release(struct device *dev) |
33 | { | 34 | { |
@@ -100,9 +101,8 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
100 | return 0; | 101 | return 0; |
101 | } | 102 | } |
102 | 103 | ||
103 | #ifdef CONFIG_PM | 104 | #ifdef CONFIG_PM_SLEEP |
104 | 105 | static int spi_legacy_suspend(struct device *dev, pm_message_t message) | |
105 | static int spi_suspend(struct device *dev, pm_message_t message) | ||
106 | { | 106 | { |
107 | int value = 0; | 107 | int value = 0; |
108 | struct spi_driver *drv = to_spi_driver(dev->driver); | 108 | struct spi_driver *drv = to_spi_driver(dev->driver); |
@@ -117,7 +117,7 @@ static int spi_suspend(struct device *dev, pm_message_t message) | |||
117 | return value; | 117 | return value; |
118 | } | 118 | } |
119 | 119 | ||
120 | static int spi_resume(struct device *dev) | 120 | static int spi_legacy_resume(struct device *dev) |
121 | { | 121 | { |
122 | int value = 0; | 122 | int value = 0; |
123 | struct spi_driver *drv = to_spi_driver(dev->driver); | 123 | struct spi_driver *drv = to_spi_driver(dev->driver); |
@@ -132,18 +132,94 @@ static int spi_resume(struct device *dev) | |||
132 | return value; | 132 | return value; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int spi_pm_suspend(struct device *dev) | ||
136 | { | ||
137 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
138 | |||
139 | if (pm) | ||
140 | return pm_generic_suspend(dev); | ||
141 | else | ||
142 | return spi_legacy_suspend(dev, PMSG_SUSPEND); | ||
143 | } | ||
144 | |||
145 | static int spi_pm_resume(struct device *dev) | ||
146 | { | ||
147 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
148 | |||
149 | if (pm) | ||
150 | return pm_generic_resume(dev); | ||
151 | else | ||
152 | return spi_legacy_resume(dev); | ||
153 | } | ||
154 | |||
155 | static int spi_pm_freeze(struct device *dev) | ||
156 | { | ||
157 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
158 | |||
159 | if (pm) | ||
160 | return pm_generic_freeze(dev); | ||
161 | else | ||
162 | return spi_legacy_suspend(dev, PMSG_FREEZE); | ||
163 | } | ||
164 | |||
165 | static int spi_pm_thaw(struct device *dev) | ||
166 | { | ||
167 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
168 | |||
169 | if (pm) | ||
170 | return pm_generic_thaw(dev); | ||
171 | else | ||
172 | return spi_legacy_resume(dev); | ||
173 | } | ||
174 | |||
175 | static int spi_pm_poweroff(struct device *dev) | ||
176 | { | ||
177 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
178 | |||
179 | if (pm) | ||
180 | return pm_generic_poweroff(dev); | ||
181 | else | ||
182 | return spi_legacy_suspend(dev, PMSG_HIBERNATE); | ||
183 | } | ||
184 | |||
185 | static int spi_pm_restore(struct device *dev) | ||
186 | { | ||
187 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
188 | |||
189 | if (pm) | ||
190 | return pm_generic_restore(dev); | ||
191 | else | ||
192 | return spi_legacy_resume(dev); | ||
193 | } | ||
135 | #else | 194 | #else |
136 | #define spi_suspend NULL | 195 | #define spi_pm_suspend NULL |
137 | #define spi_resume NULL | 196 | #define spi_pm_resume NULL |
197 | #define spi_pm_freeze NULL | ||
198 | #define spi_pm_thaw NULL | ||
199 | #define spi_pm_poweroff NULL | ||
200 | #define spi_pm_restore NULL | ||
138 | #endif | 201 | #endif |
139 | 202 | ||
203 | static const struct dev_pm_ops spi_pm = { | ||
204 | .suspend = spi_pm_suspend, | ||
205 | .resume = spi_pm_resume, | ||
206 | .freeze = spi_pm_freeze, | ||
207 | .thaw = spi_pm_thaw, | ||
208 | .poweroff = spi_pm_poweroff, | ||
209 | .restore = spi_pm_restore, | ||
210 | SET_RUNTIME_PM_OPS( | ||
211 | pm_generic_runtime_suspend, | ||
212 | pm_generic_runtime_resume, | ||
213 | pm_generic_runtime_idle | ||
214 | ) | ||
215 | }; | ||
216 | |||
140 | struct bus_type spi_bus_type = { | 217 | struct bus_type spi_bus_type = { |
141 | .name = "spi", | 218 | .name = "spi", |
142 | .dev_attrs = spi_dev_attrs, | 219 | .dev_attrs = spi_dev_attrs, |
143 | .match = spi_match_device, | 220 | .match = spi_match_device, |
144 | .uevent = spi_uevent, | 221 | .uevent = spi_uevent, |
145 | .suspend = spi_suspend, | 222 | .pm = &spi_pm, |
146 | .resume = spi_resume, | ||
147 | }; | 223 | }; |
148 | EXPORT_SYMBOL_GPL(spi_bus_type); | 224 | EXPORT_SYMBOL_GPL(spi_bus_type); |
149 | 225 | ||
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index b9278a1fb9e5..fca61720b873 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev) | |||
375 | * Just re-enable it without affecting the endpoint toggles. | 375 | * Just re-enable it without affecting the endpoint toggles. |
376 | */ | 376 | */ |
377 | usb_enable_interface(udev, intf, false); | 377 | usb_enable_interface(udev, intf, false); |
378 | } else if (!error && intf->dev.power.status == DPM_ON) { | 378 | } else if (!error && !intf->dev.power.in_suspend) { |
379 | r = usb_set_interface(udev, intf->altsetting[0]. | 379 | r = usb_set_interface(udev, intf->altsetting[0]. |
380 | desc.bInterfaceNumber, 0); | 380 | desc.bInterfaceNumber, 0); |
381 | if (r < 0) | 381 | if (r < 0) |
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | /* Try to rebind the interface */ | 962 | /* Try to rebind the interface */ |
963 | if (intf->dev.power.status == DPM_ON) { | 963 | if (!intf->dev.power.in_suspend) { |
964 | intf->needs_binding = 0; | 964 | intf->needs_binding = 0; |
965 | rc = device_attach(&intf->dev); | 965 | rc = device_attach(&intf->dev); |
966 | if (rc < 0) | 966 | if (rc < 0) |
@@ -1107,8 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev, | |||
1107 | if (intf->condition == USB_INTERFACE_UNBOUND) { | 1107 | if (intf->condition == USB_INTERFACE_UNBOUND) { |
1108 | 1108 | ||
1109 | /* Carry out a deferred switch to altsetting 0 */ | 1109 | /* Carry out a deferred switch to altsetting 0 */ |
1110 | if (intf->needs_altsetting0 && | 1110 | if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) { |
1111 | intf->dev.power.status == DPM_ON) { | ||
1112 | usb_set_interface(udev, intf->altsetting[0]. | 1111 | usb_set_interface(udev, intf->altsetting[0]. |
1113 | desc.bInterfaceNumber, 0); | 1112 | desc.bInterfaceNumber, 0); |
1114 | intf->needs_altsetting0 = 0; | 1113 | intf->needs_altsetting0 = 0; |
diff --git a/include/linux/device.h b/include/linux/device.h index dd4895313468..45bc8c1669d2 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -508,13 +508,13 @@ static inline int device_is_registered(struct device *dev) | |||
508 | 508 | ||
509 | static inline void device_enable_async_suspend(struct device *dev) | 509 | static inline void device_enable_async_suspend(struct device *dev) |
510 | { | 510 | { |
511 | if (dev->power.status == DPM_ON) | 511 | if (!dev->power.in_suspend) |
512 | dev->power.async_suspend = true; | 512 | dev->power.async_suspend = true; |
513 | } | 513 | } |
514 | 514 | ||
515 | static inline void device_disable_async_suspend(struct device *dev) | 515 | static inline void device_disable_async_suspend(struct device *dev) |
516 | { | 516 | { |
517 | if (dev->power.status == DPM_ON) | 517 | if (!dev->power.in_suspend) |
518 | dev->power.async_suspend = false; | 518 | dev->power.async_suspend = false; |
519 | } | 519 | } |
520 | 520 | ||
diff --git a/include/linux/pm.h b/include/linux/pm.h index 40f3f45702ba..dd9c7ab38270 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -367,45 +367,6 @@ extern struct dev_pm_ops generic_subsys_pm_ops; | |||
367 | { .event = PM_EVENT_AUTO_RESUME, }) | 367 | { .event = PM_EVENT_AUTO_RESUME, }) |
368 | 368 | ||
369 | /** | 369 | /** |
370 | * Device power management states | ||
371 | * | ||
372 | * These state labels are used internally by the PM core to indicate the current | ||
373 | * status of a device with respect to the PM core operations. | ||
374 | * | ||
375 | * DPM_ON Device is regarded as operational. Set this way | ||
376 | * initially and when ->complete() is about to be called. | ||
377 | * Also set when ->prepare() fails. | ||
378 | * | ||
379 | * DPM_PREPARING Device is going to be prepared for a PM transition. Set | ||
380 | * when ->prepare() is about to be called. | ||
381 | * | ||
382 | * DPM_RESUMING Device is going to be resumed. Set when ->resume(), | ||
383 | * ->thaw(), or ->restore() is about to be called. | ||
384 | * | ||
385 | * DPM_SUSPENDING Device has been prepared for a power transition. Set | ||
386 | * when ->prepare() has just succeeded. | ||
387 | * | ||
388 | * DPM_OFF Device is regarded as inactive. Set immediately after | ||
389 | * ->suspend(), ->freeze(), or ->poweroff() has succeeded. | ||
390 | * Also set when ->resume()_noirq, ->thaw_noirq(), or | ||
391 | * ->restore_noirq() is about to be called. | ||
392 | * | ||
393 | * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after | ||
394 | * ->suspend_noirq(), ->freeze_noirq(), or | ||
395 | * ->poweroff_noirq() has just succeeded. | ||
396 | */ | ||
397 | |||
398 | enum dpm_state { | ||
399 | DPM_INVALID, | ||
400 | DPM_ON, | ||
401 | DPM_PREPARING, | ||
402 | DPM_RESUMING, | ||
403 | DPM_SUSPENDING, | ||
404 | DPM_OFF, | ||
405 | DPM_OFF_IRQ, | ||
406 | }; | ||
407 | |||
408 | /** | ||
409 | * Device run-time power management status. | 370 | * Device run-time power management status. |
410 | * | 371 | * |
411 | * These status labels are used internally by the PM core to indicate the | 372 | * These status labels are used internally by the PM core to indicate the |
@@ -463,8 +424,8 @@ struct wakeup_source; | |||
463 | struct dev_pm_info { | 424 | struct dev_pm_info { |
464 | pm_message_t power_state; | 425 | pm_message_t power_state; |
465 | unsigned int can_wakeup:1; | 426 | unsigned int can_wakeup:1; |
466 | unsigned async_suspend:1; | 427 | unsigned int async_suspend:1; |
467 | enum dpm_state status; /* Owned by the PM core */ | 428 | unsigned int in_suspend:1; /* Owned by the PM core */ |
468 | spinlock_t lock; | 429 | spinlock_t lock; |
469 | #ifdef CONFIG_PM_SLEEP | 430 | #ifdef CONFIG_PM_SLEEP |
470 | struct list_head entry; | 431 | struct list_head entry; |
@@ -486,6 +447,7 @@ struct dev_pm_info { | |||
486 | unsigned int run_wake:1; | 447 | unsigned int run_wake:1; |
487 | unsigned int runtime_auto:1; | 448 | unsigned int runtime_auto:1; |
488 | unsigned int no_callbacks:1; | 449 | unsigned int no_callbacks:1; |
450 | unsigned int irq_safe:1; | ||
489 | unsigned int use_autosuspend:1; | 451 | unsigned int use_autosuspend:1; |
490 | unsigned int timer_autosuspends:1; | 452 | unsigned int timer_autosuspends:1; |
491 | enum rpm_request request; | 453 | enum rpm_request request; |
@@ -610,4 +572,11 @@ extern unsigned int pm_flags; | |||
610 | #define PM_APM 1 | 572 | #define PM_APM 1 |
611 | #define PM_ACPI 2 | 573 | #define PM_ACPI 2 |
612 | 574 | ||
575 | extern int pm_generic_suspend(struct device *dev); | ||
576 | extern int pm_generic_resume(struct device *dev); | ||
577 | extern int pm_generic_freeze(struct device *dev); | ||
578 | extern int pm_generic_thaw(struct device *dev); | ||
579 | extern int pm_generic_restore(struct device *dev); | ||
580 | extern int pm_generic_poweroff(struct device *dev); | ||
581 | |||
613 | #endif /* _LINUX_PM_H */ | 582 | #endif /* _LINUX_PM_H */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index d19f1cca7f74..d34f067e2a7f 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -40,6 +40,7 @@ extern int pm_generic_runtime_idle(struct device *dev); | |||
40 | extern int pm_generic_runtime_suspend(struct device *dev); | 40 | extern int pm_generic_runtime_suspend(struct device *dev); |
41 | extern int pm_generic_runtime_resume(struct device *dev); | 41 | extern int pm_generic_runtime_resume(struct device *dev); |
42 | extern void pm_runtime_no_callbacks(struct device *dev); | 42 | extern void pm_runtime_no_callbacks(struct device *dev); |
43 | extern void pm_runtime_irq_safe(struct device *dev); | ||
43 | extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); | 44 | extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); |
44 | extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); | 45 | extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); |
45 | extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); | 46 | extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); |
@@ -81,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev) | |||
81 | && !dev->power.disable_depth; | 82 | && !dev->power.disable_depth; |
82 | } | 83 | } |
83 | 84 | ||
85 | static inline bool pm_runtime_enabled(struct device *dev) | ||
86 | { | ||
87 | return !dev->power.disable_depth; | ||
88 | } | ||
89 | |||
84 | static inline void pm_runtime_mark_last_busy(struct device *dev) | 90 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
85 | { | 91 | { |
86 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | 92 | ACCESS_ONCE(dev->power.last_busy) = jiffies; |
@@ -119,11 +125,13 @@ static inline void pm_runtime_put_noidle(struct device *dev) {} | |||
119 | static inline bool device_run_wake(struct device *dev) { return false; } | 125 | static inline bool device_run_wake(struct device *dev) { return false; } |
120 | static inline void device_set_run_wake(struct device *dev, bool enable) {} | 126 | static inline void device_set_run_wake(struct device *dev, bool enable) {} |
121 | static inline bool pm_runtime_suspended(struct device *dev) { return false; } | 127 | static inline bool pm_runtime_suspended(struct device *dev) { return false; } |
128 | static inline bool pm_runtime_enabled(struct device *dev) { return false; } | ||
122 | 129 | ||
123 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } | 130 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } |
124 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | 131 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } |
125 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | 132 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } |
126 | static inline void pm_runtime_no_callbacks(struct device *dev) {} | 133 | static inline void pm_runtime_no_callbacks(struct device *dev) {} |
134 | static inline void pm_runtime_irq_safe(struct device *dev) {} | ||
127 | 135 | ||
128 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} | 136 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} |
129 | static inline void __pm_runtime_use_autosuspend(struct device *dev, | 137 | static inline void __pm_runtime_use_autosuspend(struct device *dev, |
@@ -196,6 +204,11 @@ static inline int pm_runtime_put_sync(struct device *dev) | |||
196 | return __pm_runtime_idle(dev, RPM_GET_PUT); | 204 | return __pm_runtime_idle(dev, RPM_GET_PUT); |
197 | } | 205 | } |
198 | 206 | ||
207 | static inline int pm_runtime_put_sync_suspend(struct device *dev) | ||
208 | { | ||
209 | return __pm_runtime_suspend(dev, RPM_GET_PUT); | ||
210 | } | ||
211 | |||
199 | static inline int pm_runtime_put_sync_autosuspend(struct device *dev) | 212 | static inline int pm_runtime_put_sync_autosuspend(struct device *dev) |
200 | { | 213 | { |
201 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); | 214 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 26697514c5ec..144b34be5c32 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -292,7 +292,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
292 | /* drivers/base/power/wakeup.c */ | 292 | /* drivers/base/power/wakeup.c */ |
293 | extern bool events_check_enabled; | 293 | extern bool events_check_enabled; |
294 | 294 | ||
295 | extern bool pm_check_wakeup_events(void); | 295 | extern bool pm_wakeup_pending(void); |
296 | extern bool pm_get_wakeup_count(unsigned int *count); | 296 | extern bool pm_get_wakeup_count(unsigned int *count); |
297 | extern bool pm_save_wakeup_count(unsigned int count); | 297 | extern bool pm_save_wakeup_count(unsigned int count); |
298 | #else /* !CONFIG_PM_SLEEP */ | 298 | #else /* !CONFIG_PM_SLEEP */ |
@@ -309,7 +309,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
309 | 309 | ||
310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | 310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
311 | 311 | ||
312 | static inline bool pm_check_wakeup_events(void) { return true; } | 312 | static inline bool pm_wakeup_pending(void) { return false; } |
313 | #endif /* !CONFIG_PM_SLEEP */ | 313 | #endif /* !CONFIG_PM_SLEEP */ |
314 | 314 | ||
315 | extern struct mutex pm_mutex; | 315 | extern struct mutex pm_mutex; |
diff --git a/kernel/freezer.c b/kernel/freezer.c index bd1d42b17cb2..66ecd2ead215 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -104,8 +104,13 @@ bool freeze_task(struct task_struct *p, bool sig_only) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | if (should_send_signal(p)) { | 106 | if (should_send_signal(p)) { |
107 | if (!signal_pending(p)) | 107 | fake_signal_wake_up(p); |
108 | fake_signal_wake_up(p); | 108 | /* |
109 | * fake_signal_wake_up() goes through p's scheduler | ||
110 | * lock and guarantees that TASK_STOPPED/TRACED -> | ||
111 | * TASK_RUNNING transition can't race with task state | ||
112 | * testing in try_to_freeze_tasks(). | ||
113 | */ | ||
109 | } else if (sig_only) { | 114 | } else if (sig_only) { |
110 | return false; | 115 | return false; |
111 | } else { | 116 | } else { |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index f9063c6b185d..b75597235d85 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -1,7 +1,4 @@ | |||
1 | 1 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG | |
2 | ifeq ($(CONFIG_PM_DEBUG),y) | ||
3 | EXTRA_CFLAGS += -DDEBUG | ||
4 | endif | ||
5 | 2 | ||
6 | obj-$(CONFIG_PM) += main.o | 3 | obj-$(CONFIG_PM) += main.o |
7 | obj-$(CONFIG_PM_SLEEP) += console.o | 4 | obj-$(CONFIG_PM_SLEEP) += console.o |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 048d0b514831..870f72bc72ae 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -62,7 +62,7 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops) | |||
62 | { | 62 | { |
63 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot | 63 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot |
64 | && ops->prepare && ops->finish && ops->enter && ops->pre_restore | 64 | && ops->prepare && ops->finish && ops->enter && ops->pre_restore |
65 | && ops->restore_cleanup)) { | 65 | && ops->restore_cleanup && ops->leave)) { |
66 | WARN_ON(1); | 66 | WARN_ON(1); |
67 | return; | 67 | return; |
68 | } | 68 | } |
@@ -278,7 +278,7 @@ static int create_image(int platform_mode) | |||
278 | goto Enable_irqs; | 278 | goto Enable_irqs; |
279 | } | 279 | } |
280 | 280 | ||
281 | if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) | 281 | if (hibernation_test(TEST_CORE) || pm_wakeup_pending()) |
282 | goto Power_up; | 282 | goto Power_up; |
283 | 283 | ||
284 | in_suspend = 1; | 284 | in_suspend = 1; |
@@ -516,7 +516,7 @@ int hibernation_platform_enter(void) | |||
516 | 516 | ||
517 | local_irq_disable(); | 517 | local_irq_disable(); |
518 | sysdev_suspend(PMSG_HIBERNATE); | 518 | sysdev_suspend(PMSG_HIBERNATE); |
519 | if (!pm_check_wakeup_events()) { | 519 | if (pm_wakeup_pending()) { |
520 | error = -EAGAIN; | 520 | error = -EAGAIN; |
521 | goto Power_up; | 521 | goto Power_up; |
522 | } | 522 | } |
@@ -647,6 +647,7 @@ int hibernate(void) | |||
647 | swsusp_free(); | 647 | swsusp_free(); |
648 | if (!error) | 648 | if (!error) |
649 | power_down(); | 649 | power_down(); |
650 | in_suspend = 0; | ||
650 | pm_restore_gfp_mask(); | 651 | pm_restore_gfp_mask(); |
651 | } else { | 652 | } else { |
652 | pr_debug("PM: Image restored successfully.\n"); | 653 | pr_debug("PM: Image restored successfully.\n"); |
diff --git a/kernel/power/process.c b/kernel/power/process.c index e50b4c1b2a0f..d6d2a10320e0 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -64,6 +64,12 @@ static int try_to_freeze_tasks(bool sig_only) | |||
64 | * perturb a task in TASK_STOPPED or TASK_TRACED. | 64 | * perturb a task in TASK_STOPPED or TASK_TRACED. |
65 | * It is "frozen enough". If the task does wake | 65 | * It is "frozen enough". If the task does wake |
66 | * up, it will immediately call try_to_freeze. | 66 | * up, it will immediately call try_to_freeze. |
67 | * | ||
68 | * Because freeze_task() goes through p's | ||
69 | * scheduler lock after setting TIF_FREEZE, it's | ||
70 | * guaranteed that either we see TASK_RUNNING or | ||
71 | * try_to_stop() after schedule() in ptrace/signal | ||
72 | * stop sees TIF_FREEZE. | ||
67 | */ | 73 | */ |
68 | if (!task_is_stopped_or_traced(p) && | 74 | if (!task_is_stopped_or_traced(p) && |
69 | !freezer_should_skip(p)) | 75 | !freezer_should_skip(p)) |
@@ -79,7 +85,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
79 | if (!todo || time_after(jiffies, end_time)) | 85 | if (!todo || time_after(jiffies, end_time)) |
80 | break; | 86 | break; |
81 | 87 | ||
82 | if (!pm_check_wakeup_events()) { | 88 | if (pm_wakeup_pending()) { |
83 | wakeup = true; | 89 | wakeup = true; |
84 | break; | 90 | break; |
85 | } | 91 | } |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 031d5e3a6197..8850df68794d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -164,7 +164,7 @@ static int suspend_enter(suspend_state_t state) | |||
164 | 164 | ||
165 | error = sysdev_suspend(PMSG_SUSPEND); | 165 | error = sysdev_suspend(PMSG_SUSPEND); |
166 | if (!error) { | 166 | if (!error) { |
167 | if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { | 167 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { |
168 | error = suspend_ops->enter(state); | 168 | error = suspend_ops->enter(state); |
169 | events_check_enabled = false; | 169 | events_check_enabled = false; |
170 | } | 170 | } |