diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2012-03-04 17:11:14 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-03-04 17:11:14 -0500 |
commit | 643161ace2a7624fd0106ede12ae43bcbbfc1de0 (patch) | |
tree | 56fb5d4af5c5e46da8cfe3c613a84f1402a60d41 | |
parent | 743c5bc210f45b728a246da65fd1a3160566d34d (diff) | |
parent | 37f08be11be9a7d9351fb1b9b408259519a126f3 (diff) |
Merge branch 'pm-sleep'
* pm-sleep:
PM / Freezer: Remove references to TIF_FREEZE in comments
PM / Sleep: Add more wakeup source initialization routines
PM / Hibernate: Enable usermodehelpers in hibernate() error path
PM / Sleep: Make __pm_stay_awake() delete wakeup source timers
PM / Sleep: Fix race conditions related to wakeup source timer function
PM / Sleep: Fix possible infinite loop during wakeup source destruction
PM / Hibernate: print physical addresses consistently with other parts of kernel
PM: Add comment describing relationships between PM callbacks to pm.h
PM / Sleep: Drop suspend_stats_update()
PM / Sleep: Make enter_state() in kernel/power/suspend.c static
PM / Sleep: Unify kerneldoc comments in kernel/power/suspend.c
PM / Sleep: Remove unnecessary label from suspend_freeze_processes()
PM / Sleep: Do not check wakeup too often in try_to_freeze_tasks()
PM / Sleep: Initialize wakeup source locks in wakeup_source_add()
PM / Hibernate: Refactor and simplify freezer_test_done
PM / Hibernate: Thaw kernel threads in hibernation_snapshot() in error/test path
PM / Freezer / Docs: Document the beauty of freeze/thaw semantics
PM / Suspend: Avoid code duplication in suspend statistics update
PM / Sleep: Introduce generic callbacks for new device PM phases
PM / Sleep: Introduce "late suspend" and "early resume" of devices
-rw-r--r-- | Documentation/power/devices.txt | 93 | ||||
-rw-r--r-- | Documentation/power/freezing-of-tasks.txt | 21 | ||||
-rw-r--r-- | arch/x86/kernel/apm_32.c | 11 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 157 | ||||
-rw-r--r-- | drivers/base/power/main.c | 247 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 85 | ||||
-rw-r--r-- | drivers/xen/manage.c | 6 | ||||
-rw-r--r-- | include/linux/pm.h | 58 | ||||
-rw-r--r-- | include/linux/pm_wakeup.h | 22 | ||||
-rw-r--r-- | include/linux/suspend.h | 4 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/freezer.c | 6 | ||||
-rw-r--r-- | kernel/kexec.c | 8 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 47 | ||||
-rw-r--r-- | kernel/power/main.c | 20 | ||||
-rw-r--r-- | kernel/power/power.h | 7 | ||||
-rw-r--r-- | kernel/power/process.c | 24 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 7 | ||||
-rw-r--r-- | kernel/power/suspend.c | 84 | ||||
-rw-r--r-- | kernel/power/user.c | 12 |
20 files changed, 662 insertions, 259 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index 20af7def23c8..872815cd41d3 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt | |||
@@ -96,6 +96,12 @@ struct dev_pm_ops { | |||
96 | int (*thaw)(struct device *dev); | 96 | int (*thaw)(struct device *dev); |
97 | int (*poweroff)(struct device *dev); | 97 | int (*poweroff)(struct device *dev); |
98 | int (*restore)(struct device *dev); | 98 | int (*restore)(struct device *dev); |
99 | int (*suspend_late)(struct device *dev); | ||
100 | int (*resume_early)(struct device *dev); | ||
101 | int (*freeze_late)(struct device *dev); | ||
102 | int (*thaw_early)(struct device *dev); | ||
103 | int (*poweroff_late)(struct device *dev); | ||
104 | int (*restore_early)(struct device *dev); | ||
99 | int (*suspend_noirq)(struct device *dev); | 105 | int (*suspend_noirq)(struct device *dev); |
100 | int (*resume_noirq)(struct device *dev); | 106 | int (*resume_noirq)(struct device *dev); |
101 | int (*freeze_noirq)(struct device *dev); | 107 | int (*freeze_noirq)(struct device *dev); |
@@ -305,7 +311,7 @@ Entering System Suspend | |||
305 | ----------------------- | 311 | ----------------------- |
306 | When the system goes into the standby or memory sleep state, the phases are: | 312 | When the system goes into the standby or memory sleep state, the phases are: |
307 | 313 | ||
308 | prepare, suspend, suspend_noirq. | 314 | prepare, suspend, suspend_late, suspend_noirq. |
309 | 315 | ||
310 | 1. The prepare phase is meant to prevent races by preventing new devices | 316 | 1. The prepare phase is meant to prevent races by preventing new devices |
311 | from being registered; the PM core would never know that all the | 317 | from being registered; the PM core would never know that all the |
@@ -324,7 +330,12 @@ When the system goes into the standby or memory sleep state, the phases are: | |||
324 | appropriate low-power state, depending on the bus type the device is on, | 330 | appropriate low-power state, depending on the bus type the device is on, |
325 | and they may enable wakeup events. | 331 | and they may enable wakeup events. |
326 | 332 | ||
327 | 3. The suspend_noirq phase occurs after IRQ handlers have been disabled, | 333 | 3 For a number of devices it is convenient to split suspend into the |
334 | "quiesce device" and "save device state" phases, in which cases | ||
335 | suspend_late is meant to do the latter. It is always executed after | ||
336 | runtime power management has been disabled for all devices. | ||
337 | |||
338 | 4. The suspend_noirq phase occurs after IRQ handlers have been disabled, | ||
328 | which means that the driver's interrupt handler will not be called while | 339 | which means that the driver's interrupt handler will not be called while |
329 | the callback method is running. The methods should save the values of | 340 | the callback method is running. The methods should save the values of |
330 | the device's registers that weren't saved previously and finally put the | 341 | the device's registers that weren't saved previously and finally put the |
@@ -359,7 +370,7 @@ Leaving System Suspend | |||
359 | ---------------------- | 370 | ---------------------- |
360 | When resuming from standby or memory sleep, the phases are: | 371 | When resuming from standby or memory sleep, the phases are: |
361 | 372 | ||
362 | resume_noirq, resume, complete. | 373 | resume_noirq, resume_early, resume, complete. |
363 | 374 | ||
364 | 1. The resume_noirq callback methods should perform any actions needed | 375 | 1. The resume_noirq callback methods should perform any actions needed |
365 | before the driver's interrupt handlers are invoked. This generally | 376 | before the driver's interrupt handlers are invoked. This generally |
@@ -375,14 +386,18 @@ When resuming from standby or memory sleep, the phases are: | |||
375 | device driver's ->pm.resume_noirq() method to perform device-specific | 386 | device driver's ->pm.resume_noirq() method to perform device-specific |
376 | actions. | 387 | actions. |
377 | 388 | ||
378 | 2. The resume methods should bring the the device back to its operating | 389 | 2. The resume_early methods should prepare devices for the execution of |
390 | the resume methods. This generally involves undoing the actions of the | ||
391 | preceding suspend_late phase. | ||
392 | |||
393 | 3 The resume methods should bring the the device back to its operating | ||
379 | state, so that it can perform normal I/O. This generally involves | 394 | state, so that it can perform normal I/O. This generally involves |
380 | undoing the actions of the suspend phase. | 395 | undoing the actions of the suspend phase. |
381 | 396 | ||
382 | 3. The complete phase uses only a bus callback. The method should undo the | 397 | 4. The complete phase should undo the actions of the prepare phase. Note, |
383 | actions of the prepare phase. Note, however, that new children may be | 398 | however, that new children may be registered below the device as soon as |
384 | registered below the device as soon as the resume callbacks occur; it's | 399 | the resume callbacks occur; it's not necessary to wait until the |
385 | not necessary to wait until the complete phase. | 400 | complete phase. |
386 | 401 | ||
387 | At the end of these phases, drivers should be as functional as they were before | 402 | At the end of these phases, drivers should be as functional as they were before |
388 | suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are | 403 | suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are |
@@ -429,8 +444,8 @@ an image of the system memory while everything is stable, reactivate all | |||
429 | devices (thaw), write the image to permanent storage, and finally shut down the | 444 | devices (thaw), write the image to permanent storage, and finally shut down the |
430 | system (poweroff). The phases used to accomplish this are: | 445 | system (poweroff). The phases used to accomplish this are: |
431 | 446 | ||
432 | prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete, | 447 | prepare, freeze, freeze_late, freeze_noirq, thaw_noirq, thaw_early, |
433 | prepare, poweroff, poweroff_noirq | 448 | thaw, complete, prepare, poweroff, poweroff_late, poweroff_noirq |
434 | 449 | ||
435 | 1. The prepare phase is discussed in the "Entering System Suspend" section | 450 | 1. The prepare phase is discussed in the "Entering System Suspend" section |
436 | above. | 451 | above. |
@@ -441,7 +456,11 @@ system (poweroff). The phases used to accomplish this are: | |||
441 | save time it's best not to do so. Also, the device should not be | 456 | save time it's best not to do so. Also, the device should not be |
442 | prepared to generate wakeup events. | 457 | prepared to generate wakeup events. |
443 | 458 | ||
444 | 3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed | 459 | 3. The freeze_late phase is analogous to the suspend_late phase described |
460 | above, except that the device should not be put in a low-power state and | ||
461 | should not be allowed to generate wakeup events by it. | ||
462 | |||
463 | 4. The freeze_noirq phase is analogous to the suspend_noirq phase discussed | ||
445 | above, except again that the device should not be put in a low-power | 464 | above, except again that the device should not be put in a low-power |
446 | state and should not be allowed to generate wakeup events. | 465 | state and should not be allowed to generate wakeup events. |
447 | 466 | ||
@@ -449,15 +468,19 @@ At this point the system image is created. All devices should be inactive and | |||
449 | the contents of memory should remain undisturbed while this happens, so that the | 468 | the contents of memory should remain undisturbed while this happens, so that the |
450 | image forms an atomic snapshot of the system state. | 469 | image forms an atomic snapshot of the system state. |
451 | 470 | ||
452 | 4. The thaw_noirq phase is analogous to the resume_noirq phase discussed | 471 | 5. The thaw_noirq phase is analogous to the resume_noirq phase discussed |
453 | above. The main difference is that its methods can assume the device is | 472 | above. The main difference is that its methods can assume the device is |
454 | in the same state as at the end of the freeze_noirq phase. | 473 | in the same state as at the end of the freeze_noirq phase. |
455 | 474 | ||
456 | 5. The thaw phase is analogous to the resume phase discussed above. Its | 475 | 6. The thaw_early phase is analogous to the resume_early phase described |
476 | above. Its methods should undo the actions of the preceding | ||
477 | freeze_late, if necessary. | ||
478 | |||
479 | 7. The thaw phase is analogous to the resume phase discussed above. Its | ||
457 | methods should bring the device back to an operating state, so that it | 480 | methods should bring the device back to an operating state, so that it |
458 | can be used for saving the image if necessary. | 481 | can be used for saving the image if necessary. |
459 | 482 | ||
460 | 6. The complete phase is discussed in the "Leaving System Suspend" section | 483 | 8. The complete phase is discussed in the "Leaving System Suspend" section |
461 | above. | 484 | above. |
462 | 485 | ||
463 | At this point the system image is saved, and the devices then need to be | 486 | At this point the system image is saved, and the devices then need to be |
@@ -465,16 +488,19 @@ prepared for the upcoming system shutdown. This is much like suspending them | |||
465 | before putting the system into the standby or memory sleep state, and the phases | 488 | before putting the system into the standby or memory sleep state, and the phases |
466 | are similar. | 489 | are similar. |
467 | 490 | ||
468 | 7. The prepare phase is discussed above. | 491 | 9. The prepare phase is discussed above. |
492 | |||
493 | 10. The poweroff phase is analogous to the suspend phase. | ||
469 | 494 | ||
470 | 8. The poweroff phase is analogous to the suspend phase. | 495 | 11. The poweroff_late phase is analogous to the suspend_late phase. |
471 | 496 | ||
472 | 9. The poweroff_noirq phase is analogous to the suspend_noirq phase. | 497 | 12. The poweroff_noirq phase is analogous to the suspend_noirq phase. |
473 | 498 | ||
474 | The poweroff and poweroff_noirq callbacks should do essentially the same things | 499 | The poweroff, poweroff_late and poweroff_noirq callbacks should do essentially |
475 | as the suspend and suspend_noirq callbacks. The only notable difference is that | 500 | the same things as the suspend, suspend_late and suspend_noirq callbacks, |
476 | they need not store the device register values, because the registers should | 501 | respectively. The only notable difference is that they need not store the |
477 | already have been stored during the freeze or freeze_noirq phases. | 502 | device register values, because the registers should already have been stored |
503 | during the freeze, freeze_late or freeze_noirq phases. | ||
478 | 504 | ||
479 | 505 | ||
480 | Leaving Hibernation | 506 | Leaving Hibernation |
@@ -518,22 +544,25 @@ To achieve this, the image kernel must restore the devices' pre-hibernation | |||
518 | functionality. The operation is much like waking up from the memory sleep | 544 | functionality. The operation is much like waking up from the memory sleep |
519 | state, although it involves different phases: | 545 | state, although it involves different phases: |
520 | 546 | ||
521 | restore_noirq, restore, complete | 547 | restore_noirq, restore_early, restore, complete |
522 | 548 | ||
523 | 1. The restore_noirq phase is analogous to the resume_noirq phase. | 549 | 1. The restore_noirq phase is analogous to the resume_noirq phase. |
524 | 550 | ||
525 | 2. The restore phase is analogous to the resume phase. | 551 | 2. The restore_early phase is analogous to the resume_early phase. |
552 | |||
553 | 3. The restore phase is analogous to the resume phase. | ||
526 | 554 | ||
527 | 3. The complete phase is discussed above. | 555 | 4. The complete phase is discussed above. |
528 | 556 | ||
529 | The main difference from resume[_noirq] is that restore[_noirq] must assume the | 557 | The main difference from resume[_early|_noirq] is that restore[_early|_noirq] |
530 | device has been accessed and reconfigured by the boot loader or the boot kernel. | 558 | must assume the device has been accessed and reconfigured by the boot loader or |
531 | Consequently the state of the device may be different from the state remembered | 559 | the boot kernel. Consequently the state of the device may be different from the |
532 | from the freeze and freeze_noirq phases. The device may even need to be reset | 560 | state remembered from the freeze, freeze_late and freeze_noirq phases. The |
533 | and completely re-initialized. In many cases this difference doesn't matter, so | 561 | device may even need to be reset and completely re-initialized. In many cases |
534 | the resume[_noirq] and restore[_norq] method pointers can be set to the same | 562 | this difference doesn't matter, so the resume[_early|_noirq] and |
535 | routines. Nevertheless, different callback pointers are used in case there is a | 563 | restore[_early|_norq] method pointers can be set to the same routines. |
536 | situation where it actually matters. | 564 | Nevertheless, different callback pointers are used in case there is a situation |
565 | where it actually does matter. | ||
537 | 566 | ||
538 | 567 | ||
539 | Device Power Management Domains | 568 | Device Power Management Domains |
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt index ebd7490ef1df..ec715cd78fbb 100644 --- a/Documentation/power/freezing-of-tasks.txt +++ b/Documentation/power/freezing-of-tasks.txt | |||
@@ -63,6 +63,27 @@ devices have been reinitialized, the function thaw_processes() is called in | |||
63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that | 63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that |
64 | have been frozen leave __refrigerator() and continue running. | 64 | have been frozen leave __refrigerator() and continue running. |
65 | 65 | ||
66 | |||
67 | Rationale behind the functions dealing with freezing and thawing of tasks: | ||
68 | ------------------------------------------------------------------------- | ||
69 | |||
70 | freeze_processes(): | ||
71 | - freezes only userspace tasks | ||
72 | |||
73 | freeze_kernel_threads(): | ||
74 | - freezes all tasks (including kernel threads) because we can't freeze | ||
75 | kernel threads without freezing userspace tasks | ||
76 | |||
77 | thaw_kernel_threads(): | ||
78 | - thaws only kernel threads; this is particularly useful if we need to do | ||
79 | anything special in between thawing of kernel threads and thawing of | ||
80 | userspace tasks, or if we want to postpone the thawing of userspace tasks | ||
81 | |||
82 | thaw_processes(): | ||
83 | - thaws all tasks (including kernel threads) because we can't thaw userspace | ||
84 | tasks without thawing kernel threads | ||
85 | |||
86 | |||
66 | III. Which kernel threads are freezable? | 87 | III. Which kernel threads are freezable? |
67 | 88 | ||
68 | Kernel threads are not freezable by default. However, a kernel thread may clear | 89 | Kernel threads are not freezable by default. However, a kernel thread may clear |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index f76623cbe263..5d56931a15b3 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1234,8 +1234,7 @@ static int suspend(int vetoable) | |||
1234 | struct apm_user *as; | 1234 | struct apm_user *as; |
1235 | 1235 | ||
1236 | dpm_suspend_start(PMSG_SUSPEND); | 1236 | dpm_suspend_start(PMSG_SUSPEND); |
1237 | 1237 | dpm_suspend_end(PMSG_SUSPEND); | |
1238 | dpm_suspend_noirq(PMSG_SUSPEND); | ||
1239 | 1238 | ||
1240 | local_irq_disable(); | 1239 | local_irq_disable(); |
1241 | syscore_suspend(); | 1240 | syscore_suspend(); |
@@ -1259,9 +1258,9 @@ static int suspend(int vetoable) | |||
1259 | syscore_resume(); | 1258 | syscore_resume(); |
1260 | local_irq_enable(); | 1259 | local_irq_enable(); |
1261 | 1260 | ||
1262 | dpm_resume_noirq(PMSG_RESUME); | 1261 | dpm_resume_start(PMSG_RESUME); |
1263 | |||
1264 | dpm_resume_end(PMSG_RESUME); | 1262 | dpm_resume_end(PMSG_RESUME); |
1263 | |||
1265 | queue_event(APM_NORMAL_RESUME, NULL); | 1264 | queue_event(APM_NORMAL_RESUME, NULL); |
1266 | spin_lock(&user_list_lock); | 1265 | spin_lock(&user_list_lock); |
1267 | for (as = user_list; as != NULL; as = as->next) { | 1266 | for (as = user_list; as != NULL; as = as->next) { |
@@ -1277,7 +1276,7 @@ static void standby(void) | |||
1277 | { | 1276 | { |
1278 | int err; | 1277 | int err; |
1279 | 1278 | ||
1280 | dpm_suspend_noirq(PMSG_SUSPEND); | 1279 | dpm_suspend_end(PMSG_SUSPEND); |
1281 | 1280 | ||
1282 | local_irq_disable(); | 1281 | local_irq_disable(); |
1283 | syscore_suspend(); | 1282 | syscore_suspend(); |
@@ -1291,7 +1290,7 @@ static void standby(void) | |||
1291 | syscore_resume(); | 1290 | syscore_resume(); |
1292 | local_irq_enable(); | 1291 | local_irq_enable(); |
1293 | 1292 | ||
1294 | dpm_resume_noirq(PMSG_RESUME); | 1293 | dpm_resume_start(PMSG_RESUME); |
1295 | } | 1294 | } |
1296 | 1295 | ||
1297 | static apm_event_t get_event(void) | 1296 | static apm_event_t get_event(void) |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 10bdd793f0bd..d03d290f31c2 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 95 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. |
96 | * @dev: Device to handle. | 96 | * @dev: Device to suspend. |
97 | * @event: PM transition of the system under way. | ||
98 | * @bool: Whether or not this is the "noirq" stage. | ||
99 | * | ||
100 | * Execute the PM callback corresponding to @event provided by the driver of | ||
101 | * @dev, if defined, and return its error code. Return 0 if the callback is | ||
102 | * not present. | ||
103 | */ | 97 | */ |
104 | static int __pm_generic_call(struct device *dev, int event, bool noirq) | 98 | int pm_generic_suspend_noirq(struct device *dev) |
105 | { | 99 | { |
106 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 100 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
107 | int (*callback)(struct device *); | ||
108 | |||
109 | if (!pm) | ||
110 | return 0; | ||
111 | |||
112 | switch (event) { | ||
113 | case PM_EVENT_SUSPEND: | ||
114 | callback = noirq ? pm->suspend_noirq : pm->suspend; | ||
115 | break; | ||
116 | case PM_EVENT_FREEZE: | ||
117 | callback = noirq ? pm->freeze_noirq : pm->freeze; | ||
118 | break; | ||
119 | case PM_EVENT_HIBERNATE: | ||
120 | callback = noirq ? pm->poweroff_noirq : pm->poweroff; | ||
121 | break; | ||
122 | case PM_EVENT_RESUME: | ||
123 | callback = noirq ? pm->resume_noirq : pm->resume; | ||
124 | break; | ||
125 | case PM_EVENT_THAW: | ||
126 | callback = noirq ? pm->thaw_noirq : pm->thaw; | ||
127 | break; | ||
128 | case PM_EVENT_RESTORE: | ||
129 | callback = noirq ? pm->restore_noirq : pm->restore; | ||
130 | break; | ||
131 | default: | ||
132 | callback = NULL; | ||
133 | break; | ||
134 | } | ||
135 | 101 | ||
136 | return callback ? callback(dev) : 0; | 102 | return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; |
137 | } | 103 | } |
104 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | ||
138 | 105 | ||
139 | /** | 106 | /** |
140 | * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. | 107 | * pm_generic_suspend_late - Generic suspend_late callback for subsystems. |
141 | * @dev: Device to suspend. | 108 | * @dev: Device to suspend. |
142 | */ | 109 | */ |
143 | int pm_generic_suspend_noirq(struct device *dev) | 110 | int pm_generic_suspend_late(struct device *dev) |
144 | { | 111 | { |
145 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); | 112 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
113 | |||
114 | return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; | ||
146 | } | 115 | } |
147 | EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | 116 | EXPORT_SYMBOL_GPL(pm_generic_suspend_late); |
148 | 117 | ||
149 | /** | 118 | /** |
150 | * pm_generic_suspend - Generic suspend callback for subsystems. | 119 | * pm_generic_suspend - Generic suspend callback for subsystems. |
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); | |||
152 | */ | 121 | */ |
153 | int pm_generic_suspend(struct device *dev) | 122 | int pm_generic_suspend(struct device *dev) |
154 | { | 123 | { |
155 | return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); | 124 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
125 | |||
126 | return pm && pm->suspend ? pm->suspend(dev) : 0; | ||
156 | } | 127 | } |
157 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | 128 | EXPORT_SYMBOL_GPL(pm_generic_suspend); |
158 | 129 | ||
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend); | |||
162 | */ | 133 | */ |
163 | int pm_generic_freeze_noirq(struct device *dev) | 134 | int pm_generic_freeze_noirq(struct device *dev) |
164 | { | 135 | { |
165 | return __pm_generic_call(dev, PM_EVENT_FREEZE, true); | 136 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
137 | |||
138 | return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; | ||
166 | } | 139 | } |
167 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); | 140 | EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); |
168 | 141 | ||
169 | /** | 142 | /** |
143 | * pm_generic_freeze_late - Generic freeze_late callback for subsystems. | ||
144 | * @dev: Device to freeze. | ||
145 | */ | ||
146 | int pm_generic_freeze_late(struct device *dev) | ||
147 | { | ||
148 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
149 | |||
150 | return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(pm_generic_freeze_late); | ||
153 | |||
154 | /** | ||
170 | * pm_generic_freeze - Generic freeze callback for subsystems. | 155 | * pm_generic_freeze - Generic freeze callback for subsystems. |
171 | * @dev: Device to freeze. | 156 | * @dev: Device to freeze. |
172 | */ | 157 | */ |
173 | int pm_generic_freeze(struct device *dev) | 158 | int pm_generic_freeze(struct device *dev) |
174 | { | 159 | { |
175 | return __pm_generic_call(dev, PM_EVENT_FREEZE, false); | 160 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
161 | |||
162 | return pm && pm->freeze ? pm->freeze(dev) : 0; | ||
176 | } | 163 | } |
177 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | 164 | EXPORT_SYMBOL_GPL(pm_generic_freeze); |
178 | 165 | ||
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze); | |||
182 | */ | 169 | */ |
183 | int pm_generic_poweroff_noirq(struct device *dev) | 170 | int pm_generic_poweroff_noirq(struct device *dev) |
184 | { | 171 | { |
185 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); | 172 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
173 | |||
174 | return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; | ||
186 | } | 175 | } |
187 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); | 176 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); |
188 | 177 | ||
189 | /** | 178 | /** |
179 | * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. | ||
180 | * @dev: Device to handle. | ||
181 | */ | ||
182 | int pm_generic_poweroff_late(struct device *dev) | ||
183 | { | ||
184 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
185 | |||
186 | return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; | ||
187 | } | ||
188 | EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); | ||
189 | |||
190 | /** | ||
190 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | 191 | * pm_generic_poweroff - Generic poweroff callback for subsystems. |
191 | * @dev: Device to handle. | 192 | * @dev: Device to handle. |
192 | */ | 193 | */ |
193 | int pm_generic_poweroff(struct device *dev) | 194 | int pm_generic_poweroff(struct device *dev) |
194 | { | 195 | { |
195 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); | 196 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
197 | |||
198 | return pm && pm->poweroff ? pm->poweroff(dev) : 0; | ||
196 | } | 199 | } |
197 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | 200 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); |
198 | 201 | ||
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff); | |||
202 | */ | 205 | */ |
203 | int pm_generic_thaw_noirq(struct device *dev) | 206 | int pm_generic_thaw_noirq(struct device *dev) |
204 | { | 207 | { |
205 | return __pm_generic_call(dev, PM_EVENT_THAW, true); | 208 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
209 | |||
210 | return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; | ||
206 | } | 211 | } |
207 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); | 212 | EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); |
208 | 213 | ||
209 | /** | 214 | /** |
215 | * pm_generic_thaw_early - Generic thaw_early callback for subsystems. | ||
216 | * @dev: Device to thaw. | ||
217 | */ | ||
218 | int pm_generic_thaw_early(struct device *dev) | ||
219 | { | ||
220 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
221 | |||
222 | return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(pm_generic_thaw_early); | ||
225 | |||
226 | /** | ||
210 | * pm_generic_thaw - Generic thaw callback for subsystems. | 227 | * pm_generic_thaw - Generic thaw callback for subsystems. |
211 | * @dev: Device to thaw. | 228 | * @dev: Device to thaw. |
212 | */ | 229 | */ |
213 | int pm_generic_thaw(struct device *dev) | 230 | int pm_generic_thaw(struct device *dev) |
214 | { | 231 | { |
215 | return __pm_generic_call(dev, PM_EVENT_THAW, false); | 232 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
233 | |||
234 | return pm && pm->thaw ? pm->thaw(dev) : 0; | ||
216 | } | 235 | } |
217 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | 236 | EXPORT_SYMBOL_GPL(pm_generic_thaw); |
218 | 237 | ||
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw); | |||
222 | */ | 241 | */ |
223 | int pm_generic_resume_noirq(struct device *dev) | 242 | int pm_generic_resume_noirq(struct device *dev) |
224 | { | 243 | { |
225 | return __pm_generic_call(dev, PM_EVENT_RESUME, true); | 244 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
245 | |||
246 | return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; | ||
226 | } | 247 | } |
227 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); | 248 | EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); |
228 | 249 | ||
229 | /** | 250 | /** |
251 | * pm_generic_resume_early - Generic resume_early callback for subsystems. | ||
252 | * @dev: Device to resume. | ||
253 | */ | ||
254 | int pm_generic_resume_early(struct device *dev) | ||
255 | { | ||
256 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
257 | |||
258 | return pm && pm->resume_early ? pm->resume_early(dev) : 0; | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(pm_generic_resume_early); | ||
261 | |||
262 | /** | ||
230 | * pm_generic_resume - Generic resume callback for subsystems. | 263 | * pm_generic_resume - Generic resume callback for subsystems. |
231 | * @dev: Device to resume. | 264 | * @dev: Device to resume. |
232 | */ | 265 | */ |
233 | int pm_generic_resume(struct device *dev) | 266 | int pm_generic_resume(struct device *dev) |
234 | { | 267 | { |
235 | return __pm_generic_call(dev, PM_EVENT_RESUME, false); | 268 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
269 | |||
270 | return pm && pm->resume ? pm->resume(dev) : 0; | ||
236 | } | 271 | } |
237 | EXPORT_SYMBOL_GPL(pm_generic_resume); | 272 | EXPORT_SYMBOL_GPL(pm_generic_resume); |
238 | 273 | ||
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); | |||
242 | */ | 277 | */ |
243 | int pm_generic_restore_noirq(struct device *dev) | 278 | int pm_generic_restore_noirq(struct device *dev) |
244 | { | 279 | { |
245 | return __pm_generic_call(dev, PM_EVENT_RESTORE, true); | 280 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
281 | |||
282 | return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; | ||
246 | } | 283 | } |
247 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); | 284 | EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); |
248 | 285 | ||
249 | /** | 286 | /** |
287 | * pm_generic_restore_early - Generic restore_early callback for subsystems. | ||
288 | * @dev: Device to resume. | ||
289 | */ | ||
290 | int pm_generic_restore_early(struct device *dev) | ||
291 | { | ||
292 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
293 | |||
294 | return pm && pm->restore_early ? pm->restore_early(dev) : 0; | ||
295 | } | ||
296 | EXPORT_SYMBOL_GPL(pm_generic_restore_early); | ||
297 | |||
298 | /** | ||
250 | * pm_generic_restore - Generic restore callback for subsystems. | 299 | * pm_generic_restore - Generic restore callback for subsystems. |
251 | * @dev: Device to restore. | 300 | * @dev: Device to restore. |
252 | */ | 301 | */ |
253 | int pm_generic_restore(struct device *dev) | 302 | int pm_generic_restore(struct device *dev) |
254 | { | 303 | { |
255 | return __pm_generic_call(dev, PM_EVENT_RESTORE, false); | 304 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
305 | |||
306 | return pm && pm->restore ? pm->restore(dev) : 0; | ||
256 | } | 307 | } |
257 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 308 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
258 | 309 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e2cc3d2e0ecc..b462c0e341cb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *); | |||
47 | LIST_HEAD(dpm_list); | 47 | LIST_HEAD(dpm_list); |
48 | LIST_HEAD(dpm_prepared_list); | 48 | LIST_HEAD(dpm_prepared_list); |
49 | LIST_HEAD(dpm_suspended_list); | 49 | LIST_HEAD(dpm_suspended_list); |
50 | LIST_HEAD(dpm_late_early_list); | ||
50 | LIST_HEAD(dpm_noirq_list); | 51 | LIST_HEAD(dpm_noirq_list); |
51 | 52 | ||
52 | struct suspend_stats suspend_stats; | 53 | struct suspend_stats suspend_stats; |
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) | |||
246 | } | 247 | } |
247 | 248 | ||
248 | /** | 249 | /** |
250 | * pm_late_early_op - Return the PM operation appropriate for given PM event. | ||
251 | * @ops: PM operations to choose from. | ||
252 | * @state: PM transition of the system being carried out. | ||
253 | * | ||
254 | * Runtime PM is disabled for @dev while this function is being executed. | ||
255 | */ | ||
256 | static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, | ||
257 | pm_message_t state) | ||
258 | { | ||
259 | switch (state.event) { | ||
260 | #ifdef CONFIG_SUSPEND | ||
261 | case PM_EVENT_SUSPEND: | ||
262 | return ops->suspend_late; | ||
263 | case PM_EVENT_RESUME: | ||
264 | return ops->resume_early; | ||
265 | #endif /* CONFIG_SUSPEND */ | ||
266 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
267 | case PM_EVENT_FREEZE: | ||
268 | case PM_EVENT_QUIESCE: | ||
269 | return ops->freeze_late; | ||
270 | case PM_EVENT_HIBERNATE: | ||
271 | return ops->poweroff_late; | ||
272 | case PM_EVENT_THAW: | ||
273 | case PM_EVENT_RECOVER: | ||
274 | return ops->thaw_early; | ||
275 | case PM_EVENT_RESTORE: | ||
276 | return ops->restore_early; | ||
277 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | ||
278 | } | ||
279 | |||
280 | return NULL; | ||
281 | } | ||
282 | |||
283 | /** | ||
249 | * pm_noirq_op - Return the PM operation appropriate for given PM event. | 284 | * pm_noirq_op - Return the PM operation appropriate for given PM event. |
250 | * @ops: PM operations to choose from. | 285 | * @ops: PM operations to choose from. |
251 | * @state: PM transition of the system being carried out. | 286 | * @state: PM transition of the system being carried out. |
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
374 | TRACE_RESUME(0); | 409 | TRACE_RESUME(0); |
375 | 410 | ||
376 | if (dev->pm_domain) { | 411 | if (dev->pm_domain) { |
377 | info = "EARLY power domain "; | 412 | info = "noirq power domain "; |
378 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 413 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
379 | } else if (dev->type && dev->type->pm) { | 414 | } else if (dev->type && dev->type->pm) { |
380 | info = "EARLY type "; | 415 | info = "noirq type "; |
381 | callback = pm_noirq_op(dev->type->pm, state); | 416 | callback = pm_noirq_op(dev->type->pm, state); |
382 | } else if (dev->class && dev->class->pm) { | 417 | } else if (dev->class && dev->class->pm) { |
383 | info = "EARLY class "; | 418 | info = "noirq class "; |
384 | callback = pm_noirq_op(dev->class->pm, state); | 419 | callback = pm_noirq_op(dev->class->pm, state); |
385 | } else if (dev->bus && dev->bus->pm) { | 420 | } else if (dev->bus && dev->bus->pm) { |
386 | info = "EARLY bus "; | 421 | info = "noirq bus "; |
387 | callback = pm_noirq_op(dev->bus->pm, state); | 422 | callback = pm_noirq_op(dev->bus->pm, state); |
388 | } | 423 | } |
389 | 424 | ||
390 | if (!callback && dev->driver && dev->driver->pm) { | 425 | if (!callback && dev->driver && dev->driver->pm) { |
391 | info = "EARLY driver "; | 426 | info = "noirq driver "; |
392 | callback = pm_noirq_op(dev->driver->pm, state); | 427 | callback = pm_noirq_op(dev->driver->pm, state); |
393 | } | 428 | } |
394 | 429 | ||
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
399 | } | 434 | } |
400 | 435 | ||
401 | /** | 436 | /** |
402 | * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. | 437 | * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. |
403 | * @state: PM transition of the system being carried out. | 438 | * @state: PM transition of the system being carried out. |
404 | * | 439 | * |
405 | * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and | 440 | * Call the "noirq" resume handlers for all devices in dpm_noirq_list and |
406 | * enable device drivers to receive interrupts. | 441 | * enable device drivers to receive interrupts. |
407 | */ | 442 | */ |
408 | void dpm_resume_noirq(pm_message_t state) | 443 | static void dpm_resume_noirq(pm_message_t state) |
409 | { | 444 | { |
410 | ktime_t starttime = ktime_get(); | 445 | ktime_t starttime = ktime_get(); |
411 | 446 | ||
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state) | |||
415 | int error; | 450 | int error; |
416 | 451 | ||
417 | get_device(dev); | 452 | get_device(dev); |
418 | list_move_tail(&dev->power.entry, &dpm_suspended_list); | 453 | list_move_tail(&dev->power.entry, &dpm_late_early_list); |
419 | mutex_unlock(&dpm_list_mtx); | 454 | mutex_unlock(&dpm_list_mtx); |
420 | 455 | ||
421 | error = device_resume_noirq(dev, state); | 456 | error = device_resume_noirq(dev, state); |
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state) | |||
423 | suspend_stats.failed_resume_noirq++; | 458 | suspend_stats.failed_resume_noirq++; |
424 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); | 459 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); |
425 | dpm_save_failed_dev(dev_name(dev)); | 460 | dpm_save_failed_dev(dev_name(dev)); |
461 | pm_dev_err(dev, state, " noirq", error); | ||
462 | } | ||
463 | |||
464 | mutex_lock(&dpm_list_mtx); | ||
465 | put_device(dev); | ||
466 | } | ||
467 | mutex_unlock(&dpm_list_mtx); | ||
468 | dpm_show_time(starttime, state, "noirq"); | ||
469 | resume_device_irqs(); | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * device_resume_early - Execute an "early resume" callback for given device. | ||
474 | * @dev: Device to handle. | ||
475 | * @state: PM transition of the system being carried out. | ||
476 | * | ||
477 | * Runtime PM is disabled for @dev while this function is being executed. | ||
478 | */ | ||
479 | static int device_resume_early(struct device *dev, pm_message_t state) | ||
480 | { | ||
481 | pm_callback_t callback = NULL; | ||
482 | char *info = NULL; | ||
483 | int error = 0; | ||
484 | |||
485 | TRACE_DEVICE(dev); | ||
486 | TRACE_RESUME(0); | ||
487 | |||
488 | if (dev->pm_domain) { | ||
489 | info = "early power domain "; | ||
490 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
491 | } else if (dev->type && dev->type->pm) { | ||
492 | info = "early type "; | ||
493 | callback = pm_late_early_op(dev->type->pm, state); | ||
494 | } else if (dev->class && dev->class->pm) { | ||
495 | info = "early class "; | ||
496 | callback = pm_late_early_op(dev->class->pm, state); | ||
497 | } else if (dev->bus && dev->bus->pm) { | ||
498 | info = "early bus "; | ||
499 | callback = pm_late_early_op(dev->bus->pm, state); | ||
500 | } | ||
501 | |||
502 | if (!callback && dev->driver && dev->driver->pm) { | ||
503 | info = "early driver "; | ||
504 | callback = pm_late_early_op(dev->driver->pm, state); | ||
505 | } | ||
506 | |||
507 | error = dpm_run_callback(callback, dev, state, info); | ||
508 | |||
509 | TRACE_RESUME(error); | ||
510 | return error; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * dpm_resume_early - Execute "early resume" callbacks for all devices. | ||
515 | * @state: PM transition of the system being carried out. | ||
516 | */ | ||
517 | static void dpm_resume_early(pm_message_t state) | ||
518 | { | ||
519 | ktime_t starttime = ktime_get(); | ||
520 | |||
521 | mutex_lock(&dpm_list_mtx); | ||
522 | while (!list_empty(&dpm_late_early_list)) { | ||
523 | struct device *dev = to_device(dpm_late_early_list.next); | ||
524 | int error; | ||
525 | |||
526 | get_device(dev); | ||
527 | list_move_tail(&dev->power.entry, &dpm_suspended_list); | ||
528 | mutex_unlock(&dpm_list_mtx); | ||
529 | |||
530 | error = device_resume_early(dev, state); | ||
531 | if (error) { | ||
532 | suspend_stats.failed_resume_early++; | ||
533 | dpm_save_failed_step(SUSPEND_RESUME_EARLY); | ||
534 | dpm_save_failed_dev(dev_name(dev)); | ||
426 | pm_dev_err(dev, state, " early", error); | 535 | pm_dev_err(dev, state, " early", error); |
427 | } | 536 | } |
428 | 537 | ||
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state) | |||
431 | } | 540 | } |
432 | mutex_unlock(&dpm_list_mtx); | 541 | mutex_unlock(&dpm_list_mtx); |
433 | dpm_show_time(starttime, state, "early"); | 542 | dpm_show_time(starttime, state, "early"); |
434 | resume_device_irqs(); | ||
435 | } | 543 | } |
436 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 544 | |
545 | /** | ||
546 | * dpm_resume_start - Execute "noirq" and "early" device callbacks. | ||
547 | * @state: PM transition of the system being carried out. | ||
548 | */ | ||
549 | void dpm_resume_start(pm_message_t state) | ||
550 | { | ||
551 | dpm_resume_noirq(state); | ||
552 | dpm_resume_early(state); | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(dpm_resume_start); | ||
437 | 555 | ||
438 | /** | 556 | /** |
439 | * device_resume - Execute "resume" callbacks for given device. | 557 | * device_resume - Execute "resume" callbacks for given device. |
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
716 | char *info = NULL; | 834 | char *info = NULL; |
717 | 835 | ||
718 | if (dev->pm_domain) { | 836 | if (dev->pm_domain) { |
719 | info = "LATE power domain "; | 837 | info = "noirq power domain "; |
720 | callback = pm_noirq_op(&dev->pm_domain->ops, state); | 838 | callback = pm_noirq_op(&dev->pm_domain->ops, state); |
721 | } else if (dev->type && dev->type->pm) { | 839 | } else if (dev->type && dev->type->pm) { |
722 | info = "LATE type "; | 840 | info = "noirq type "; |
723 | callback = pm_noirq_op(dev->type->pm, state); | 841 | callback = pm_noirq_op(dev->type->pm, state); |
724 | } else if (dev->class && dev->class->pm) { | 842 | } else if (dev->class && dev->class->pm) { |
725 | info = "LATE class "; | 843 | info = "noirq class "; |
726 | callback = pm_noirq_op(dev->class->pm, state); | 844 | callback = pm_noirq_op(dev->class->pm, state); |
727 | } else if (dev->bus && dev->bus->pm) { | 845 | } else if (dev->bus && dev->bus->pm) { |
728 | info = "LATE bus "; | 846 | info = "noirq bus "; |
729 | callback = pm_noirq_op(dev->bus->pm, state); | 847 | callback = pm_noirq_op(dev->bus->pm, state); |
730 | } | 848 | } |
731 | 849 | ||
732 | if (!callback && dev->driver && dev->driver->pm) { | 850 | if (!callback && dev->driver && dev->driver->pm) { |
733 | info = "LATE driver "; | 851 | info = "noirq driver "; |
734 | callback = pm_noirq_op(dev->driver->pm, state); | 852 | callback = pm_noirq_op(dev->driver->pm, state); |
735 | } | 853 | } |
736 | 854 | ||
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
738 | } | 856 | } |
739 | 857 | ||
740 | /** | 858 | /** |
741 | * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. | 859 | * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. |
742 | * @state: PM transition of the system being carried out. | 860 | * @state: PM transition of the system being carried out. |
743 | * | 861 | * |
744 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend | 862 | * Prevent device drivers from receiving interrupts and call the "noirq" suspend |
745 | * handlers for all non-sysdev devices. | 863 | * handlers for all non-sysdev devices. |
746 | */ | 864 | */ |
747 | int dpm_suspend_noirq(pm_message_t state) | 865 | static int dpm_suspend_noirq(pm_message_t state) |
748 | { | 866 | { |
749 | ktime_t starttime = ktime_get(); | 867 | ktime_t starttime = ktime_get(); |
750 | int error = 0; | 868 | int error = 0; |
751 | 869 | ||
752 | suspend_device_irqs(); | 870 | suspend_device_irqs(); |
753 | mutex_lock(&dpm_list_mtx); | 871 | mutex_lock(&dpm_list_mtx); |
754 | while (!list_empty(&dpm_suspended_list)) { | 872 | while (!list_empty(&dpm_late_early_list)) { |
755 | struct device *dev = to_device(dpm_suspended_list.prev); | 873 | struct device *dev = to_device(dpm_late_early_list.prev); |
756 | 874 | ||
757 | get_device(dev); | 875 | get_device(dev); |
758 | mutex_unlock(&dpm_list_mtx); | 876 | mutex_unlock(&dpm_list_mtx); |
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state) | |||
761 | 879 | ||
762 | mutex_lock(&dpm_list_mtx); | 880 | mutex_lock(&dpm_list_mtx); |
763 | if (error) { | 881 | if (error) { |
764 | pm_dev_err(dev, state, " late", error); | 882 | pm_dev_err(dev, state, " noirq", error); |
765 | suspend_stats.failed_suspend_noirq++; | 883 | suspend_stats.failed_suspend_noirq++; |
766 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); | 884 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); |
767 | dpm_save_failed_dev(dev_name(dev)); | 885 | dpm_save_failed_dev(dev_name(dev)); |
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state) | |||
776 | if (error) | 894 | if (error) |
777 | dpm_resume_noirq(resume_event(state)); | 895 | dpm_resume_noirq(resume_event(state)); |
778 | else | 896 | else |
897 | dpm_show_time(starttime, state, "noirq"); | ||
898 | return error; | ||
899 | } | ||
900 | |||
901 | /** | ||
902 | * device_suspend_late - Execute a "late suspend" callback for given device. | ||
903 | * @dev: Device to handle. | ||
904 | * @state: PM transition of the system being carried out. | ||
905 | * | ||
906 | * Runtime PM is disabled for @dev while this function is being executed. | ||
907 | */ | ||
908 | static int device_suspend_late(struct device *dev, pm_message_t state) | ||
909 | { | ||
910 | pm_callback_t callback = NULL; | ||
911 | char *info = NULL; | ||
912 | |||
913 | if (dev->pm_domain) { | ||
914 | info = "late power domain "; | ||
915 | callback = pm_late_early_op(&dev->pm_domain->ops, state); | ||
916 | } else if (dev->type && dev->type->pm) { | ||
917 | info = "late type "; | ||
918 | callback = pm_late_early_op(dev->type->pm, state); | ||
919 | } else if (dev->class && dev->class->pm) { | ||
920 | info = "late class "; | ||
921 | callback = pm_late_early_op(dev->class->pm, state); | ||
922 | } else if (dev->bus && dev->bus->pm) { | ||
923 | info = "late bus "; | ||
924 | callback = pm_late_early_op(dev->bus->pm, state); | ||
925 | } | ||
926 | |||
927 | if (!callback && dev->driver && dev->driver->pm) { | ||
928 | info = "late driver "; | ||
929 | callback = pm_late_early_op(dev->driver->pm, state); | ||
930 | } | ||
931 | |||
932 | return dpm_run_callback(callback, dev, state, info); | ||
933 | } | ||
934 | |||
935 | /** | ||
936 | * dpm_suspend_late - Execute "late suspend" callbacks for all devices. | ||
937 | * @state: PM transition of the system being carried out. | ||
938 | */ | ||
939 | static int dpm_suspend_late(pm_message_t state) | ||
940 | { | ||
941 | ktime_t starttime = ktime_get(); | ||
942 | int error = 0; | ||
943 | |||
944 | mutex_lock(&dpm_list_mtx); | ||
945 | while (!list_empty(&dpm_suspended_list)) { | ||
946 | struct device *dev = to_device(dpm_suspended_list.prev); | ||
947 | |||
948 | get_device(dev); | ||
949 | mutex_unlock(&dpm_list_mtx); | ||
950 | |||
951 | error = device_suspend_late(dev, state); | ||
952 | |||
953 | mutex_lock(&dpm_list_mtx); | ||
954 | if (error) { | ||
955 | pm_dev_err(dev, state, " late", error); | ||
956 | suspend_stats.failed_suspend_late++; | ||
957 | dpm_save_failed_step(SUSPEND_SUSPEND_LATE); | ||
958 | dpm_save_failed_dev(dev_name(dev)); | ||
959 | put_device(dev); | ||
960 | break; | ||
961 | } | ||
962 | if (!list_empty(&dev->power.entry)) | ||
963 | list_move(&dev->power.entry, &dpm_late_early_list); | ||
964 | put_device(dev); | ||
965 | } | ||
966 | mutex_unlock(&dpm_list_mtx); | ||
967 | if (error) | ||
968 | dpm_resume_early(resume_event(state)); | ||
969 | else | ||
779 | dpm_show_time(starttime, state, "late"); | 970 | dpm_show_time(starttime, state, "late"); |
971 | |||
780 | return error; | 972 | return error; |
781 | } | 973 | } |
782 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 974 | |
975 | /** | ||
976 | * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. | ||
977 | * @state: PM transition of the system being carried out. | ||
978 | */ | ||
979 | int dpm_suspend_end(pm_message_t state) | ||
980 | { | ||
981 | int error = dpm_suspend_late(state); | ||
982 | |||
983 | return error ? : dpm_suspend_noirq(state); | ||
984 | } | ||
985 | EXPORT_SYMBOL_GPL(dpm_suspend_end); | ||
783 | 986 | ||
784 | /** | 987 | /** |
785 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | 988 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index caf995fb774b..2a3e581b8dcd 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data); | |||
53 | static LIST_HEAD(wakeup_sources); | 53 | static LIST_HEAD(wakeup_sources); |
54 | 54 | ||
55 | /** | 55 | /** |
56 | * wakeup_source_prepare - Prepare a new wakeup source for initialization. | ||
57 | * @ws: Wakeup source to prepare. | ||
58 | * @name: Pointer to the name of the new wakeup source. | ||
59 | * | ||
60 | * Callers must ensure that the @name string won't be freed when @ws is still in | ||
61 | * use. | ||
62 | */ | ||
63 | void wakeup_source_prepare(struct wakeup_source *ws, const char *name) | ||
64 | { | ||
65 | if (ws) { | ||
66 | memset(ws, 0, sizeof(*ws)); | ||
67 | ws->name = name; | ||
68 | } | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(wakeup_source_prepare); | ||
71 | |||
72 | /** | ||
56 | * wakeup_source_create - Create a struct wakeup_source object. | 73 | * wakeup_source_create - Create a struct wakeup_source object. |
57 | * @name: Name of the new wakeup source. | 74 | * @name: Name of the new wakeup source. |
58 | */ | 75 | */ |
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name) | |||
60 | { | 77 | { |
61 | struct wakeup_source *ws; | 78 | struct wakeup_source *ws; |
62 | 79 | ||
63 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | 80 | ws = kmalloc(sizeof(*ws), GFP_KERNEL); |
64 | if (!ws) | 81 | if (!ws) |
65 | return NULL; | 82 | return NULL; |
66 | 83 | ||
67 | spin_lock_init(&ws->lock); | 84 | wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); |
68 | if (name) | ||
69 | ws->name = kstrdup(name, GFP_KERNEL); | ||
70 | |||
71 | return ws; | 85 | return ws; |
72 | } | 86 | } |
73 | EXPORT_SYMBOL_GPL(wakeup_source_create); | 87 | EXPORT_SYMBOL_GPL(wakeup_source_create); |
74 | 88 | ||
75 | /** | 89 | /** |
90 | * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. | ||
91 | * @ws: Wakeup source to prepare for destruction. | ||
92 | * | ||
93 | * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never | ||
94 | * be run in parallel with this function for the same wakeup source object. | ||
95 | */ | ||
96 | void wakeup_source_drop(struct wakeup_source *ws) | ||
97 | { | ||
98 | if (!ws) | ||
99 | return; | ||
100 | |||
101 | del_timer_sync(&ws->timer); | ||
102 | __pm_relax(ws); | ||
103 | } | ||
104 | EXPORT_SYMBOL_GPL(wakeup_source_drop); | ||
105 | |||
106 | /** | ||
76 | * wakeup_source_destroy - Destroy a struct wakeup_source object. | 107 | * wakeup_source_destroy - Destroy a struct wakeup_source object. |
77 | * @ws: Wakeup source to destroy. | 108 | * @ws: Wakeup source to destroy. |
109 | * | ||
110 | * Use only for wakeup source objects created with wakeup_source_create(). | ||
78 | */ | 111 | */ |
79 | void wakeup_source_destroy(struct wakeup_source *ws) | 112 | void wakeup_source_destroy(struct wakeup_source *ws) |
80 | { | 113 | { |
81 | if (!ws) | 114 | if (!ws) |
82 | return; | 115 | return; |
83 | 116 | ||
84 | spin_lock_irq(&ws->lock); | 117 | wakeup_source_drop(ws); |
85 | while (ws->active) { | ||
86 | spin_unlock_irq(&ws->lock); | ||
87 | |||
88 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
89 | |||
90 | spin_lock_irq(&ws->lock); | ||
91 | } | ||
92 | spin_unlock_irq(&ws->lock); | ||
93 | |||
94 | kfree(ws->name); | 118 | kfree(ws->name); |
95 | kfree(ws); | 119 | kfree(ws); |
96 | } | 120 | } |
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws) | |||
105 | if (WARN_ON(!ws)) | 129 | if (WARN_ON(!ws)) |
106 | return; | 130 | return; |
107 | 131 | ||
132 | spin_lock_init(&ws->lock); | ||
108 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | 133 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); |
109 | ws->active = false; | 134 | ws->active = false; |
110 | 135 | ||
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register); | |||
152 | */ | 177 | */ |
153 | void wakeup_source_unregister(struct wakeup_source *ws) | 178 | void wakeup_source_unregister(struct wakeup_source *ws) |
154 | { | 179 | { |
155 | wakeup_source_remove(ws); | 180 | if (ws) { |
156 | wakeup_source_destroy(ws); | 181 | wakeup_source_remove(ws); |
182 | wakeup_source_destroy(ws); | ||
183 | } | ||
157 | } | 184 | } |
158 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); | 185 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); |
159 | 186 | ||
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws) | |||
349 | { | 376 | { |
350 | ws->active = true; | 377 | ws->active = true; |
351 | ws->active_count++; | 378 | ws->active_count++; |
352 | ws->timer_expires = jiffies; | ||
353 | ws->last_time = ktime_get(); | 379 | ws->last_time = ktime_get(); |
354 | 380 | ||
355 | /* Increment the counter of events in progress. */ | 381 | /* Increment the counter of events in progress. */ |
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws) | |||
370 | return; | 396 | return; |
371 | 397 | ||
372 | spin_lock_irqsave(&ws->lock, flags); | 398 | spin_lock_irqsave(&ws->lock, flags); |
399 | |||
373 | ws->event_count++; | 400 | ws->event_count++; |
374 | if (!ws->active) | 401 | if (!ws->active) |
375 | wakeup_source_activate(ws); | 402 | wakeup_source_activate(ws); |
403 | |||
404 | del_timer(&ws->timer); | ||
405 | ws->timer_expires = 0; | ||
406 | |||
376 | spin_unlock_irqrestore(&ws->lock, flags); | 407 | spin_unlock_irqrestore(&ws->lock, flags); |
377 | } | 408 | } |
378 | EXPORT_SYMBOL_GPL(__pm_stay_awake); | 409 | EXPORT_SYMBOL_GPL(__pm_stay_awake); |
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
438 | ws->max_time = duration; | 469 | ws->max_time = duration; |
439 | 470 | ||
440 | del_timer(&ws->timer); | 471 | del_timer(&ws->timer); |
472 | ws->timer_expires = 0; | ||
441 | 473 | ||
442 | /* | 474 | /* |
443 | * Increment the counter of registered wakeup events and decrement the | 475 | * Increment the counter of registered wakeup events and decrement the |
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax); | |||
492 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | 524 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. |
493 | * @data: Address of the wakeup source object associated with the event source. | 525 | * @data: Address of the wakeup source object associated with the event source. |
494 | * | 526 | * |
495 | * Call __pm_relax() for the wakeup source whose address is stored in @data. | 527 | * Call wakeup_source_deactivate() for the wakeup source whose address is stored |
528 | * in @data if it is currently active and its timer has not been canceled and | ||
529 | * the expiration time of the timer is not in future. | ||
496 | */ | 530 | */ |
497 | static void pm_wakeup_timer_fn(unsigned long data) | 531 | static void pm_wakeup_timer_fn(unsigned long data) |
498 | { | 532 | { |
499 | __pm_relax((struct wakeup_source *)data); | 533 | struct wakeup_source *ws = (struct wakeup_source *)data; |
534 | unsigned long flags; | ||
535 | |||
536 | spin_lock_irqsave(&ws->lock, flags); | ||
537 | |||
538 | if (ws->active && ws->timer_expires | ||
539 | && time_after_eq(jiffies, ws->timer_expires)) | ||
540 | wakeup_source_deactivate(ws); | ||
541 | |||
542 | spin_unlock_irqrestore(&ws->lock, flags); | ||
500 | } | 543 | } |
501 | 544 | ||
502 | /** | 545 | /** |
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | |||
534 | if (!expires) | 577 | if (!expires) |
535 | expires = 1; | 578 | expires = 1; |
536 | 579 | ||
537 | if (time_after(expires, ws->timer_expires)) { | 580 | if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { |
538 | mod_timer(&ws->timer, expires); | 581 | mod_timer(&ws->timer, expires); |
539 | ws->timer_expires = expires; | 582 | ws->timer_expires = expires; |
540 | } | 583 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index ce4fa0831860..9e14ae6cd49c 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -129,9 +129,9 @@ static void do_suspend(void) | |||
129 | printk(KERN_DEBUG "suspending xenstore...\n"); | 129 | printk(KERN_DEBUG "suspending xenstore...\n"); |
130 | xs_suspend(); | 130 | xs_suspend(); |
131 | 131 | ||
132 | err = dpm_suspend_noirq(PMSG_FREEZE); | 132 | err = dpm_suspend_end(PMSG_FREEZE); |
133 | if (err) { | 133 | if (err) { |
134 | printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); | 134 | printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); |
135 | goto out_resume; | 135 | goto out_resume; |
136 | } | 136 | } |
137 | 137 | ||
@@ -149,7 +149,7 @@ static void do_suspend(void) | |||
149 | 149 | ||
150 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); | 150 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); |
151 | 151 | ||
152 | dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 152 | dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
153 | 153 | ||
154 | if (err) { | 154 | if (err) { |
155 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 155 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
diff --git a/include/linux/pm.h b/include/linux/pm.h index e4982ac3fbbc..d6dd6f612b8d 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -110,6 +110,10 @@ typedef struct pm_message { | |||
110 | * Subsystem-level @suspend() is executed for all devices after invoking | 110 | * Subsystem-level @suspend() is executed for all devices after invoking |
111 | * subsystem-level @prepare() for all of them. | 111 | * subsystem-level @prepare() for all of them. |
112 | * | 112 | * |
113 | * @suspend_late: Continue operations started by @suspend(). For a number of | ||
114 | * devices @suspend_late() may point to the same callback routine as the | ||
115 | * runtime suspend callback. | ||
116 | * | ||
113 | * @resume: Executed after waking the system up from a sleep state in which the | 117 | * @resume: Executed after waking the system up from a sleep state in which the |
114 | * contents of main memory were preserved. The exact action to perform | 118 | * contents of main memory were preserved. The exact action to perform |
115 | * depends on the device's subsystem, but generally the driver is expected | 119 | * depends on the device's subsystem, but generally the driver is expected |
@@ -122,6 +126,10 @@ typedef struct pm_message { | |||
122 | * Subsystem-level @resume() is executed for all devices after invoking | 126 | * Subsystem-level @resume() is executed for all devices after invoking |
123 | * subsystem-level @resume_noirq() for all of them. | 127 | * subsystem-level @resume_noirq() for all of them. |
124 | * | 128 | * |
129 | * @resume_early: Prepare to execute @resume(). For a number of devices | ||
130 | * @resume_early() may point to the same callback routine as the runtime | ||
131 | * resume callback. | ||
132 | * | ||
125 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | 133 | * @freeze: Hibernation-specific, executed before creating a hibernation image. |
126 | * Analogous to @suspend(), but it should not enable the device to signal | 134 | * Analogous to @suspend(), but it should not enable the device to signal |
127 | * wakeup events or change its power state. The majority of subsystems | 135 | * wakeup events or change its power state. The majority of subsystems |
@@ -131,6 +139,10 @@ typedef struct pm_message { | |||
131 | * Subsystem-level @freeze() is executed for all devices after invoking | 139 | * Subsystem-level @freeze() is executed for all devices after invoking |
132 | * subsystem-level @prepare() for all of them. | 140 | * subsystem-level @prepare() for all of them. |
133 | * | 141 | * |
142 | * @freeze_late: Continue operations started by @freeze(). Analogous to | ||
143 | * @suspend_late(), but it should not enable the device to signal wakeup | ||
144 | * events or change its power state. | ||
145 | * | ||
134 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | 146 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR |
135 | * if the creation of an image has failed. Also executed after a failing | 147 | * if the creation of an image has failed. Also executed after a failing |
136 | * attempt to restore the contents of main memory from such an image. | 148 | * attempt to restore the contents of main memory from such an image. |
@@ -140,15 +152,23 @@ typedef struct pm_message { | |||
140 | * subsystem-level @thaw_noirq() for all of them. It also may be executed | 152 | * subsystem-level @thaw_noirq() for all of them. It also may be executed |
141 | * directly after @freeze() in case of a transition error. | 153 | * directly after @freeze() in case of a transition error. |
142 | * | 154 | * |
155 | * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the | ||
156 | * preceding @freeze_late(). | ||
157 | * | ||
143 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | 158 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. |
144 | * Analogous to @suspend(), but it need not save the device's settings in | 159 | * Analogous to @suspend(), but it need not save the device's settings in |
145 | * memory. | 160 | * memory. |
146 | * Subsystem-level @poweroff() is executed for all devices after invoking | 161 | * Subsystem-level @poweroff() is executed for all devices after invoking |
147 | * subsystem-level @prepare() for all of them. | 162 | * subsystem-level @prepare() for all of them. |
148 | * | 163 | * |
164 | * @poweroff_late: Continue operations started by @poweroff(). Analogous to | ||
165 | * @suspend_late(), but it need not save the device's settings in memory. | ||
166 | * | ||
149 | * @restore: Hibernation-specific, executed after restoring the contents of main | 167 | * @restore: Hibernation-specific, executed after restoring the contents of main |
150 | * memory from a hibernation image, analogous to @resume(). | 168 | * memory from a hibernation image, analogous to @resume(). |
151 | * | 169 | * |
170 | * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). | ||
171 | * | ||
152 | * @suspend_noirq: Complete the actions started by @suspend(). Carry out any | 172 | * @suspend_noirq: Complete the actions started by @suspend(). Carry out any |
153 | * additional operations required for suspending the device that might be | 173 | * additional operations required for suspending the device that might be |
154 | * racing with its driver's interrupt handler, which is guaranteed not to | 174 | * racing with its driver's interrupt handler, which is guaranteed not to |
@@ -158,9 +178,10 @@ typedef struct pm_message { | |||
158 | * @suspend_noirq() has returned successfully. If the device can generate | 178 | * @suspend_noirq() has returned successfully. If the device can generate |
159 | * system wakeup signals and is enabled to wake up the system, it should be | 179 | * system wakeup signals and is enabled to wake up the system, it should be |
160 | * configured to do so at that time. However, depending on the platform | 180 | * configured to do so at that time. However, depending on the platform |
161 | * and device's subsystem, @suspend() may be allowed to put the device into | 181 | * and device's subsystem, @suspend() or @suspend_late() may be allowed to |
162 | * the low-power state and configure it to generate wakeup signals, in | 182 | * put the device into the low-power state and configure it to generate |
163 | * which case it generally is not necessary to define @suspend_noirq(). | 183 | * wakeup signals, in which case it generally is not necessary to define |
184 | * @suspend_noirq(). | ||
164 | * | 185 | * |
165 | * @resume_noirq: Prepare for the execution of @resume() by carrying out any | 186 | * @resume_noirq: Prepare for the execution of @resume() by carrying out any |
166 | * operations required for resuming the device that might be racing with | 187 | * operations required for resuming the device that might be racing with |
@@ -171,9 +192,9 @@ typedef struct pm_message { | |||
171 | * additional operations required for freezing the device that might be | 192 | * additional operations required for freezing the device that might be |
172 | * racing with its driver's interrupt handler, which is guaranteed not to | 193 | * racing with its driver's interrupt handler, which is guaranteed not to |
173 | * run while @freeze_noirq() is being executed. | 194 | * run while @freeze_noirq() is being executed. |
174 | * The power state of the device should not be changed by either @freeze() | 195 | * The power state of the device should not be changed by either @freeze(), |
175 | * or @freeze_noirq() and it should not be configured to signal system | 196 | * or @freeze_late(), or @freeze_noirq() and it should not be configured to |
176 | * wakeup by any of these callbacks. | 197 | * signal system wakeup by any of these callbacks. |
177 | * | 198 | * |
178 | * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any | 199 | * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any |
179 | * operations required for thawing the device that might be racing with its | 200 | * operations required for thawing the device that might be racing with its |
@@ -249,6 +270,12 @@ struct dev_pm_ops { | |||
249 | int (*thaw)(struct device *dev); | 270 | int (*thaw)(struct device *dev); |
250 | int (*poweroff)(struct device *dev); | 271 | int (*poweroff)(struct device *dev); |
251 | int (*restore)(struct device *dev); | 272 | int (*restore)(struct device *dev); |
273 | int (*suspend_late)(struct device *dev); | ||
274 | int (*resume_early)(struct device *dev); | ||
275 | int (*freeze_late)(struct device *dev); | ||
276 | int (*thaw_early)(struct device *dev); | ||
277 | int (*poweroff_late)(struct device *dev); | ||
278 | int (*restore_early)(struct device *dev); | ||
252 | int (*suspend_noirq)(struct device *dev); | 279 | int (*suspend_noirq)(struct device *dev); |
253 | int (*resume_noirq)(struct device *dev); | 280 | int (*resume_noirq)(struct device *dev); |
254 | int (*freeze_noirq)(struct device *dev); | 281 | int (*freeze_noirq)(struct device *dev); |
@@ -293,6 +320,15 @@ const struct dev_pm_ops name = { \ | |||
293 | /* | 320 | /* |
294 | * Use this for defining a set of PM operations to be used in all situations | 321 | * Use this for defining a set of PM operations to be used in all situations |
295 | * (sustem suspend, hibernation or runtime PM). | 322 | * (sustem suspend, hibernation or runtime PM). |
323 | * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should | ||
324 | * be different from the corresponding runtime PM callbacks, .runtime_suspend(), | ||
325 | * and .runtime_resume(), because .runtime_suspend() always works on an already | ||
326 | * quiescent device, while .suspend() should assume that the device may be doing | ||
327 | * something when it is called (it should ensure that the device will be | ||
328 | * quiescent after it has returned). Therefore it's better to point the "late" | ||
329 | * suspend and "early" resume callback pointers, .suspend_late() and | ||
330 | * .resume_early(), to the same routines as .runtime_suspend() and | ||
331 | * .runtime_resume(), respectively (and analogously for hibernation). | ||
296 | */ | 332 | */ |
297 | #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ | 333 | #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ |
298 | const struct dev_pm_ops name = { \ | 334 | const struct dev_pm_ops name = { \ |
@@ -584,13 +620,13 @@ struct dev_pm_domain { | |||
584 | 620 | ||
585 | #ifdef CONFIG_PM_SLEEP | 621 | #ifdef CONFIG_PM_SLEEP |
586 | extern void device_pm_lock(void); | 622 | extern void device_pm_lock(void); |
587 | extern void dpm_resume_noirq(pm_message_t state); | 623 | extern void dpm_resume_start(pm_message_t state); |
588 | extern void dpm_resume_end(pm_message_t state); | 624 | extern void dpm_resume_end(pm_message_t state); |
589 | extern void dpm_resume(pm_message_t state); | 625 | extern void dpm_resume(pm_message_t state); |
590 | extern void dpm_complete(pm_message_t state); | 626 | extern void dpm_complete(pm_message_t state); |
591 | 627 | ||
592 | extern void device_pm_unlock(void); | 628 | extern void device_pm_unlock(void); |
593 | extern int dpm_suspend_noirq(pm_message_t state); | 629 | extern int dpm_suspend_end(pm_message_t state); |
594 | extern int dpm_suspend_start(pm_message_t state); | 630 | extern int dpm_suspend_start(pm_message_t state); |
595 | extern int dpm_suspend(pm_message_t state); | 631 | extern int dpm_suspend(pm_message_t state); |
596 | extern int dpm_prepare(pm_message_t state); | 632 | extern int dpm_prepare(pm_message_t state); |
@@ -605,17 +641,23 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
605 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | 641 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
606 | 642 | ||
607 | extern int pm_generic_prepare(struct device *dev); | 643 | extern int pm_generic_prepare(struct device *dev); |
644 | extern int pm_generic_suspend_late(struct device *dev); | ||
608 | extern int pm_generic_suspend_noirq(struct device *dev); | 645 | extern int pm_generic_suspend_noirq(struct device *dev); |
609 | extern int pm_generic_suspend(struct device *dev); | 646 | extern int pm_generic_suspend(struct device *dev); |
647 | extern int pm_generic_resume_early(struct device *dev); | ||
610 | extern int pm_generic_resume_noirq(struct device *dev); | 648 | extern int pm_generic_resume_noirq(struct device *dev); |
611 | extern int pm_generic_resume(struct device *dev); | 649 | extern int pm_generic_resume(struct device *dev); |
612 | extern int pm_generic_freeze_noirq(struct device *dev); | 650 | extern int pm_generic_freeze_noirq(struct device *dev); |
651 | extern int pm_generic_freeze_late(struct device *dev); | ||
613 | extern int pm_generic_freeze(struct device *dev); | 652 | extern int pm_generic_freeze(struct device *dev); |
614 | extern int pm_generic_thaw_noirq(struct device *dev); | 653 | extern int pm_generic_thaw_noirq(struct device *dev); |
654 | extern int pm_generic_thaw_early(struct device *dev); | ||
615 | extern int pm_generic_thaw(struct device *dev); | 655 | extern int pm_generic_thaw(struct device *dev); |
616 | extern int pm_generic_restore_noirq(struct device *dev); | 656 | extern int pm_generic_restore_noirq(struct device *dev); |
657 | extern int pm_generic_restore_early(struct device *dev); | ||
617 | extern int pm_generic_restore(struct device *dev); | 658 | extern int pm_generic_restore(struct device *dev); |
618 | extern int pm_generic_poweroff_noirq(struct device *dev); | 659 | extern int pm_generic_poweroff_noirq(struct device *dev); |
660 | extern int pm_generic_poweroff_late(struct device *dev); | ||
619 | extern int pm_generic_poweroff(struct device *dev); | 661 | extern int pm_generic_poweroff(struct device *dev); |
620 | extern void pm_generic_complete(struct device *dev); | 662 | extern void pm_generic_complete(struct device *dev); |
621 | 663 | ||
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index a32da962d693..d9f05113e5fb 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -41,7 +41,7 @@ | |||
41 | * @active: Status of the wakeup source. | 41 | * @active: Status of the wakeup source. |
42 | */ | 42 | */ |
43 | struct wakeup_source { | 43 | struct wakeup_source { |
44 | char *name; | 44 | const char *name; |
45 | struct list_head entry; | 45 | struct list_head entry; |
46 | spinlock_t lock; | 46 | spinlock_t lock; |
47 | struct timer_list timer; | 47 | struct timer_list timer; |
@@ -73,7 +73,9 @@ static inline bool device_may_wakeup(struct device *dev) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | /* drivers/base/power/wakeup.c */ | 75 | /* drivers/base/power/wakeup.c */ |
76 | extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); | ||
76 | extern struct wakeup_source *wakeup_source_create(const char *name); | 77 | extern struct wakeup_source *wakeup_source_create(const char *name); |
78 | extern void wakeup_source_drop(struct wakeup_source *ws); | ||
77 | extern void wakeup_source_destroy(struct wakeup_source *ws); | 79 | extern void wakeup_source_destroy(struct wakeup_source *ws); |
78 | extern void wakeup_source_add(struct wakeup_source *ws); | 80 | extern void wakeup_source_add(struct wakeup_source *ws); |
79 | extern void wakeup_source_remove(struct wakeup_source *ws); | 81 | extern void wakeup_source_remove(struct wakeup_source *ws); |
@@ -103,11 +105,16 @@ static inline bool device_can_wakeup(struct device *dev) | |||
103 | return dev->power.can_wakeup; | 105 | return dev->power.can_wakeup; |
104 | } | 106 | } |
105 | 107 | ||
108 | static inline void wakeup_source_prepare(struct wakeup_source *ws, | ||
109 | const char *name) {} | ||
110 | |||
106 | static inline struct wakeup_source *wakeup_source_create(const char *name) | 111 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
107 | { | 112 | { |
108 | return NULL; | 113 | return NULL; |
109 | } | 114 | } |
110 | 115 | ||
116 | static inline void wakeup_source_drop(struct wakeup_source *ws) {} | ||
117 | |||
111 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} | 118 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} |
112 | 119 | ||
113 | static inline void wakeup_source_add(struct wakeup_source *ws) {} | 120 | static inline void wakeup_source_add(struct wakeup_source *ws) {} |
@@ -165,4 +172,17 @@ static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | |||
165 | 172 | ||
166 | #endif /* !CONFIG_PM_SLEEP */ | 173 | #endif /* !CONFIG_PM_SLEEP */ |
167 | 174 | ||
175 | static inline void wakeup_source_init(struct wakeup_source *ws, | ||
176 | const char *name) | ||
177 | { | ||
178 | wakeup_source_prepare(ws, name); | ||
179 | wakeup_source_add(ws); | ||
180 | } | ||
181 | |||
182 | static inline void wakeup_source_trash(struct wakeup_source *ws) | ||
183 | { | ||
184 | wakeup_source_remove(ws); | ||
185 | wakeup_source_drop(ws); | ||
186 | } | ||
187 | |||
168 | #endif /* _LINUX_PM_WAKEUP_H */ | 188 | #endif /* _LINUX_PM_WAKEUP_H */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 91784a4f8608..ac1c114c499d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -42,8 +42,10 @@ enum suspend_stat_step { | |||
42 | SUSPEND_FREEZE = 1, | 42 | SUSPEND_FREEZE = 1, |
43 | SUSPEND_PREPARE, | 43 | SUSPEND_PREPARE, |
44 | SUSPEND_SUSPEND, | 44 | SUSPEND_SUSPEND, |
45 | SUSPEND_SUSPEND_LATE, | ||
45 | SUSPEND_SUSPEND_NOIRQ, | 46 | SUSPEND_SUSPEND_NOIRQ, |
46 | SUSPEND_RESUME_NOIRQ, | 47 | SUSPEND_RESUME_NOIRQ, |
48 | SUSPEND_RESUME_EARLY, | ||
47 | SUSPEND_RESUME | 49 | SUSPEND_RESUME |
48 | }; | 50 | }; |
49 | 51 | ||
@@ -53,8 +55,10 @@ struct suspend_stats { | |||
53 | int failed_freeze; | 55 | int failed_freeze; |
54 | int failed_prepare; | 56 | int failed_prepare; |
55 | int failed_suspend; | 57 | int failed_suspend; |
58 | int failed_suspend_late; | ||
56 | int failed_suspend_noirq; | 59 | int failed_suspend_noirq; |
57 | int failed_resume; | 60 | int failed_resume; |
61 | int failed_resume_early; | ||
58 | int failed_resume_noirq; | 62 | int failed_resume_noirq; |
59 | #define REC_FAILED_NUM 2 | 63 | #define REC_FAILED_NUM 2 |
60 | int last_failed_dev; | 64 | int last_failed_dev; |
diff --git a/kernel/exit.c b/kernel/exit.c index 4b4042f9bc6a..8e6b0e626b98 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -424,7 +424,7 @@ void daemonize(const char *name, ...) | |||
424 | */ | 424 | */ |
425 | exit_mm(current); | 425 | exit_mm(current); |
426 | /* | 426 | /* |
427 | * We don't want to have TIF_FREEZE set if the system-wide hibernation | 427 | * We don't want to get frozen, in case system-wide hibernation |
428 | * or suspend transition begins right now. | 428 | * or suspend transition begins right now. |
429 | */ | 429 | */ |
430 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); | 430 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 9815b8d1eed5..11f82a4d4eae 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -99,9 +99,9 @@ static void fake_signal_wake_up(struct task_struct *p) | |||
99 | * freeze_task - send a freeze request to given task | 99 | * freeze_task - send a freeze request to given task |
100 | * @p: task to send the request to | 100 | * @p: task to send the request to |
101 | * | 101 | * |
102 | * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE | 102 | * If @p is freezing, the freeze request is sent either by sending a fake |
103 | * flag and either sending a fake signal to it or waking it up, depending | 103 | * signal (if it's not a kernel thread) or waking it up (if it's a kernel |
104 | * on whether it has %PF_FREEZER_NOSIG set. | 104 | * thread). |
105 | * | 105 | * |
106 | * RETURNS: | 106 | * RETURNS: |
107 | * %false, if @p is not freezing or already frozen; %true, otherwise | 107 | * %false, if @p is not freezing or already frozen; %true, otherwise |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 7b0886786701..a6a675cb9818 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1546,13 +1546,13 @@ int kernel_kexec(void) | |||
1546 | if (error) | 1546 | if (error) |
1547 | goto Resume_console; | 1547 | goto Resume_console; |
1548 | /* At this point, dpm_suspend_start() has been called, | 1548 | /* At this point, dpm_suspend_start() has been called, |
1549 | * but *not* dpm_suspend_noirq(). We *must* call | 1549 | * but *not* dpm_suspend_end(). We *must* call |
1550 | * dpm_suspend_noirq() now. Otherwise, drivers for | 1550 | * dpm_suspend_end() now. Otherwise, drivers for |
1551 | * some devices (e.g. interrupt controllers) become | 1551 | * some devices (e.g. interrupt controllers) become |
1552 | * desynchronized with the actual state of the | 1552 | * desynchronized with the actual state of the |
1553 | * hardware at resume time, and evil weirdness ensues. | 1553 | * hardware at resume time, and evil weirdness ensues. |
1554 | */ | 1554 | */ |
1555 | error = dpm_suspend_noirq(PMSG_FREEZE); | 1555 | error = dpm_suspend_end(PMSG_FREEZE); |
1556 | if (error) | 1556 | if (error) |
1557 | goto Resume_devices; | 1557 | goto Resume_devices; |
1558 | error = disable_nonboot_cpus(); | 1558 | error = disable_nonboot_cpus(); |
@@ -1579,7 +1579,7 @@ int kernel_kexec(void) | |||
1579 | local_irq_enable(); | 1579 | local_irq_enable(); |
1580 | Enable_cpus: | 1580 | Enable_cpus: |
1581 | enable_nonboot_cpus(); | 1581 | enable_nonboot_cpus(); |
1582 | dpm_resume_noirq(PMSG_RESTORE); | 1582 | dpm_resume_start(PMSG_RESTORE); |
1583 | Resume_devices: | 1583 | Resume_devices: |
1584 | dpm_resume_end(PMSG_RESTORE); | 1584 | dpm_resume_end(PMSG_RESTORE); |
1585 | Resume_console: | 1585 | Resume_console: |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 6d6d28870335..0a186cfde788 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -245,8 +245,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop, | |||
245 | * create_image - Create a hibernation image. | 245 | * create_image - Create a hibernation image. |
246 | * @platform_mode: Whether or not to use the platform driver. | 246 | * @platform_mode: Whether or not to use the platform driver. |
247 | * | 247 | * |
248 | * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image | 248 | * Execute device drivers' "late" and "noirq" freeze callbacks, create a |
249 | * and execute the drivers' .thaw_noirq() callbacks. | 249 | * hibernation image and run the drivers' "noirq" and "early" thaw callbacks. |
250 | * | 250 | * |
251 | * Control reappears in this routine after the subsequent restore. | 251 | * Control reappears in this routine after the subsequent restore. |
252 | */ | 252 | */ |
@@ -254,7 +254,7 @@ static int create_image(int platform_mode) | |||
254 | { | 254 | { |
255 | int error; | 255 | int error; |
256 | 256 | ||
257 | error = dpm_suspend_noirq(PMSG_FREEZE); | 257 | error = dpm_suspend_end(PMSG_FREEZE); |
258 | if (error) { | 258 | if (error) { |
259 | printk(KERN_ERR "PM: Some devices failed to power down, " | 259 | printk(KERN_ERR "PM: Some devices failed to power down, " |
260 | "aborting hibernation\n"); | 260 | "aborting hibernation\n"); |
@@ -306,7 +306,7 @@ static int create_image(int platform_mode) | |||
306 | Platform_finish: | 306 | Platform_finish: |
307 | platform_finish(platform_mode); | 307 | platform_finish(platform_mode); |
308 | 308 | ||
309 | dpm_resume_noirq(in_suspend ? | 309 | dpm_resume_start(in_suspend ? |
310 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 310 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
311 | 311 | ||
312 | return error; | 312 | return error; |
@@ -343,13 +343,13 @@ int hibernation_snapshot(int platform_mode) | |||
343 | * successful freezer test. | 343 | * successful freezer test. |
344 | */ | 344 | */ |
345 | freezer_test_done = true; | 345 | freezer_test_done = true; |
346 | goto Cleanup; | 346 | goto Thaw; |
347 | } | 347 | } |
348 | 348 | ||
349 | error = dpm_prepare(PMSG_FREEZE); | 349 | error = dpm_prepare(PMSG_FREEZE); |
350 | if (error) { | 350 | if (error) { |
351 | dpm_complete(PMSG_RECOVER); | 351 | dpm_complete(PMSG_RECOVER); |
352 | goto Cleanup; | 352 | goto Thaw; |
353 | } | 353 | } |
354 | 354 | ||
355 | suspend_console(); | 355 | suspend_console(); |
@@ -385,6 +385,8 @@ int hibernation_snapshot(int platform_mode) | |||
385 | platform_end(platform_mode); | 385 | platform_end(platform_mode); |
386 | return error; | 386 | return error; |
387 | 387 | ||
388 | Thaw: | ||
389 | thaw_kernel_threads(); | ||
388 | Cleanup: | 390 | Cleanup: |
389 | swsusp_free(); | 391 | swsusp_free(); |
390 | goto Close; | 392 | goto Close; |
@@ -394,16 +396,16 @@ int hibernation_snapshot(int platform_mode) | |||
394 | * resume_target_kernel - Restore system state from a hibernation image. | 396 | * resume_target_kernel - Restore system state from a hibernation image. |
395 | * @platform_mode: Whether or not to use the platform driver. | 397 | * @platform_mode: Whether or not to use the platform driver. |
396 | * | 398 | * |
397 | * Execute device drivers' .freeze_noirq() callbacks, restore the contents of | 399 | * Execute device drivers' "noirq" and "late" freeze callbacks, restore the |
398 | * highmem that have not been restored yet from the image and run the low-level | 400 | * contents of highmem that have not been restored yet from the image and run |
399 | * code that will restore the remaining contents of memory and switch to the | 401 | * the low-level code that will restore the remaining contents of memory and |
400 | * just restored target kernel. | 402 | * switch to the just restored target kernel. |
401 | */ | 403 | */ |
402 | static int resume_target_kernel(bool platform_mode) | 404 | static int resume_target_kernel(bool platform_mode) |
403 | { | 405 | { |
404 | int error; | 406 | int error; |
405 | 407 | ||
406 | error = dpm_suspend_noirq(PMSG_QUIESCE); | 408 | error = dpm_suspend_end(PMSG_QUIESCE); |
407 | if (error) { | 409 | if (error) { |
408 | printk(KERN_ERR "PM: Some devices failed to power down, " | 410 | printk(KERN_ERR "PM: Some devices failed to power down, " |
409 | "aborting resume\n"); | 411 | "aborting resume\n"); |
@@ -460,7 +462,7 @@ static int resume_target_kernel(bool platform_mode) | |||
460 | Cleanup: | 462 | Cleanup: |
461 | platform_restore_cleanup(platform_mode); | 463 | platform_restore_cleanup(platform_mode); |
462 | 464 | ||
463 | dpm_resume_noirq(PMSG_RECOVER); | 465 | dpm_resume_start(PMSG_RECOVER); |
464 | 466 | ||
465 | return error; | 467 | return error; |
466 | } | 468 | } |
@@ -518,7 +520,7 @@ int hibernation_platform_enter(void) | |||
518 | goto Resume_devices; | 520 | goto Resume_devices; |
519 | } | 521 | } |
520 | 522 | ||
521 | error = dpm_suspend_noirq(PMSG_HIBERNATE); | 523 | error = dpm_suspend_end(PMSG_HIBERNATE); |
522 | if (error) | 524 | if (error) |
523 | goto Resume_devices; | 525 | goto Resume_devices; |
524 | 526 | ||
@@ -549,7 +551,7 @@ int hibernation_platform_enter(void) | |||
549 | Platform_finish: | 551 | Platform_finish: |
550 | hibernation_ops->finish(); | 552 | hibernation_ops->finish(); |
551 | 553 | ||
552 | dpm_resume_noirq(PMSG_RESTORE); | 554 | dpm_resume_start(PMSG_RESTORE); |
553 | 555 | ||
554 | Resume_devices: | 556 | Resume_devices: |
555 | entering_platform_hibernation = false; | 557 | entering_platform_hibernation = false; |
@@ -616,7 +618,7 @@ int hibernate(void) | |||
616 | /* Allocate memory management structures */ | 618 | /* Allocate memory management structures */ |
617 | error = create_basic_memory_bitmaps(); | 619 | error = create_basic_memory_bitmaps(); |
618 | if (error) | 620 | if (error) |
619 | goto Exit; | 621 | goto Enable_umh; |
620 | 622 | ||
621 | printk(KERN_INFO "PM: Syncing filesystems ... "); | 623 | printk(KERN_INFO "PM: Syncing filesystems ... "); |
622 | sys_sync(); | 624 | sys_sync(); |
@@ -624,15 +626,11 @@ int hibernate(void) | |||
624 | 626 | ||
625 | error = freeze_processes(); | 627 | error = freeze_processes(); |
626 | if (error) | 628 | if (error) |
627 | goto Finish; | 629 | goto Free_bitmaps; |
628 | 630 | ||
629 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); | 631 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
630 | if (error) | 632 | if (error || freezer_test_done) |
631 | goto Thaw; | ||
632 | if (freezer_test_done) { | ||
633 | freezer_test_done = false; | ||
634 | goto Thaw; | 633 | goto Thaw; |
635 | } | ||
636 | 634 | ||
637 | if (in_suspend) { | 635 | if (in_suspend) { |
638 | unsigned int flags = 0; | 636 | unsigned int flags = 0; |
@@ -657,8 +655,13 @@ int hibernate(void) | |||
657 | 655 | ||
658 | Thaw: | 656 | Thaw: |
659 | thaw_processes(); | 657 | thaw_processes(); |
660 | Finish: | 658 | |
659 | /* Don't bother checking whether freezer_test_done is true */ | ||
660 | freezer_test_done = false; | ||
661 | |||
662 | Free_bitmaps: | ||
661 | free_basic_memory_bitmaps(); | 663 | free_basic_memory_bitmaps(); |
664 | Enable_umh: | ||
662 | usermodehelper_enable(); | 665 | usermodehelper_enable(); |
663 | Exit: | 666 | Exit: |
664 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 667 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 9824b41e5a18..1c12581f1c62 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -165,16 +165,20 @@ static int suspend_stats_show(struct seq_file *s, void *unused) | |||
165 | last_errno %= REC_FAILED_NUM; | 165 | last_errno %= REC_FAILED_NUM; |
166 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; | 166 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; |
167 | last_step %= REC_FAILED_NUM; | 167 | last_step %= REC_FAILED_NUM; |
168 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n" | 168 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" |
169 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n", | 169 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", |
170 | "success", suspend_stats.success, | 170 | "success", suspend_stats.success, |
171 | "fail", suspend_stats.fail, | 171 | "fail", suspend_stats.fail, |
172 | "failed_freeze", suspend_stats.failed_freeze, | 172 | "failed_freeze", suspend_stats.failed_freeze, |
173 | "failed_prepare", suspend_stats.failed_prepare, | 173 | "failed_prepare", suspend_stats.failed_prepare, |
174 | "failed_suspend", suspend_stats.failed_suspend, | 174 | "failed_suspend", suspend_stats.failed_suspend, |
175 | "failed_suspend_late", | ||
176 | suspend_stats.failed_suspend_late, | ||
175 | "failed_suspend_noirq", | 177 | "failed_suspend_noirq", |
176 | suspend_stats.failed_suspend_noirq, | 178 | suspend_stats.failed_suspend_noirq, |
177 | "failed_resume", suspend_stats.failed_resume, | 179 | "failed_resume", suspend_stats.failed_resume, |
180 | "failed_resume_early", | ||
181 | suspend_stats.failed_resume_early, | ||
178 | "failed_resume_noirq", | 182 | "failed_resume_noirq", |
179 | suspend_stats.failed_resume_noirq); | 183 | suspend_stats.failed_resume_noirq); |
180 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", | 184 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", |
@@ -287,16 +291,10 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
287 | 291 | ||
288 | #ifdef CONFIG_SUSPEND | 292 | #ifdef CONFIG_SUSPEND |
289 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { | 293 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { |
290 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) | 294 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { |
295 | error = pm_suspend(state); | ||
291 | break; | 296 | break; |
292 | } | 297 | } |
293 | if (state < PM_SUSPEND_MAX && *s) { | ||
294 | error = enter_state(state); | ||
295 | if (error) { | ||
296 | suspend_stats.fail++; | ||
297 | dpm_save_failed_errno(error); | ||
298 | } else | ||
299 | suspend_stats.success++; | ||
300 | } | 298 | } |
301 | #endif | 299 | #endif |
302 | 300 | ||
diff --git a/kernel/power/power.h b/kernel/power/power.h index 21724eee5206..98f3622d7407 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -177,13 +177,11 @@ extern const char *const pm_states[]; | |||
177 | 177 | ||
178 | extern bool valid_state(suspend_state_t state); | 178 | extern bool valid_state(suspend_state_t state); |
179 | extern int suspend_devices_and_enter(suspend_state_t state); | 179 | extern int suspend_devices_and_enter(suspend_state_t state); |
180 | extern int enter_state(suspend_state_t state); | ||
181 | #else /* !CONFIG_SUSPEND */ | 180 | #else /* !CONFIG_SUSPEND */ |
182 | static inline int suspend_devices_and_enter(suspend_state_t state) | 181 | static inline int suspend_devices_and_enter(suspend_state_t state) |
183 | { | 182 | { |
184 | return -ENOSYS; | 183 | return -ENOSYS; |
185 | } | 184 | } |
186 | static inline int enter_state(suspend_state_t state) { return -ENOSYS; } | ||
187 | static inline bool valid_state(suspend_state_t state) { return false; } | 185 | static inline bool valid_state(suspend_state_t state) { return false; } |
188 | #endif /* !CONFIG_SUSPEND */ | 186 | #endif /* !CONFIG_SUSPEND */ |
189 | 187 | ||
@@ -234,16 +232,14 @@ static inline int suspend_freeze_processes(void) | |||
234 | int error; | 232 | int error; |
235 | 233 | ||
236 | error = freeze_processes(); | 234 | error = freeze_processes(); |
237 | |||
238 | /* | 235 | /* |
239 | * freeze_processes() automatically thaws every task if freezing | 236 | * freeze_processes() automatically thaws every task if freezing |
240 | * fails. So we need not do anything extra upon error. | 237 | * fails. So we need not do anything extra upon error. |
241 | */ | 238 | */ |
242 | if (error) | 239 | if (error) |
243 | goto Finish; | 240 | return error; |
244 | 241 | ||
245 | error = freeze_kernel_threads(); | 242 | error = freeze_kernel_threads(); |
246 | |||
247 | /* | 243 | /* |
248 | * freeze_kernel_threads() thaws only kernel threads upon freezing | 244 | * freeze_kernel_threads() thaws only kernel threads upon freezing |
249 | * failure. So we have to thaw the userspace tasks ourselves. | 245 | * failure. So we have to thaw the userspace tasks ourselves. |
@@ -251,7 +247,6 @@ static inline int suspend_freeze_processes(void) | |||
251 | if (error) | 247 | if (error) |
252 | thaw_processes(); | 248 | thaw_processes(); |
253 | 249 | ||
254 | Finish: | ||
255 | return error; | 250 | return error; |
256 | } | 251 | } |
257 | 252 | ||
diff --git a/kernel/power/process.c b/kernel/power/process.c index 7e426459e60a..0d2aeb226108 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -53,11 +53,9 @@ static int try_to_freeze_tasks(bool user_only) | |||
53 | * It is "frozen enough". If the task does wake | 53 | * It is "frozen enough". If the task does wake |
54 | * up, it will immediately call try_to_freeze. | 54 | * up, it will immediately call try_to_freeze. |
55 | * | 55 | * |
56 | * Because freeze_task() goes through p's | 56 | * Because freeze_task() goes through p's scheduler lock, it's |
57 | * scheduler lock after setting TIF_FREEZE, it's | 57 | * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING |
58 | * guaranteed that either we see TASK_RUNNING or | 58 | * transition can't race with task state testing here. |
59 | * try_to_stop() after schedule() in ptrace/signal | ||
60 | * stop sees TIF_FREEZE. | ||
61 | */ | 59 | */ |
62 | if (!task_is_stopped_or_traced(p) && | 60 | if (!task_is_stopped_or_traced(p) && |
63 | !freezer_should_skip(p)) | 61 | !freezer_should_skip(p)) |
@@ -98,13 +96,15 @@ static int try_to_freeze_tasks(bool user_only) | |||
98 | elapsed_csecs / 100, elapsed_csecs % 100, | 96 | elapsed_csecs / 100, elapsed_csecs % 100, |
99 | todo - wq_busy, wq_busy); | 97 | todo - wq_busy, wq_busy); |
100 | 98 | ||
101 | read_lock(&tasklist_lock); | 99 | if (!wakeup) { |
102 | do_each_thread(g, p) { | 100 | read_lock(&tasklist_lock); |
103 | if (!wakeup && !freezer_should_skip(p) && | 101 | do_each_thread(g, p) { |
104 | p != current && freezing(p) && !frozen(p)) | 102 | if (p != current && !freezer_should_skip(p) |
105 | sched_show_task(p); | 103 | && freezing(p) && !frozen(p)) |
106 | } while_each_thread(g, p); | 104 | sched_show_task(p); |
107 | read_unlock(&tasklist_lock); | 105 | } while_each_thread(g, p); |
106 | read_unlock(&tasklist_lock); | ||
107 | } | ||
108 | } else { | 108 | } else { |
109 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, | 109 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, |
110 | elapsed_csecs % 100); | 110 | elapsed_csecs % 100); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 6a768e537001..8e2e7461375f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -711,9 +711,10 @@ static void mark_nosave_pages(struct memory_bitmap *bm) | |||
711 | list_for_each_entry(region, &nosave_regions, list) { | 711 | list_for_each_entry(region, &nosave_regions, list) { |
712 | unsigned long pfn; | 712 | unsigned long pfn; |
713 | 713 | ||
714 | pr_debug("PM: Marking nosave pages: %016lx - %016lx\n", | 714 | pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", |
715 | region->start_pfn << PAGE_SHIFT, | 715 | (unsigned long long) region->start_pfn << PAGE_SHIFT, |
716 | region->end_pfn << PAGE_SHIFT); | 716 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) |
717 | - 1); | ||
717 | 718 | ||
718 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) | 719 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) |
719 | if (pfn_valid(pfn)) { | 720 | if (pfn_valid(pfn)) { |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4fd51beed879..88e5c967370d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -37,8 +37,8 @@ const char *const pm_states[PM_SUSPEND_MAX] = { | |||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * suspend_set_ops - Set the global suspend method table. | 40 | * suspend_set_ops - Set the global suspend method table. |
41 | * @ops: Pointer to ops structure. | 41 | * @ops: Suspend operations to use. |
42 | */ | 42 | */ |
43 | void suspend_set_ops(const struct platform_suspend_ops *ops) | 43 | void suspend_set_ops(const struct platform_suspend_ops *ops) |
44 | { | 44 | { |
@@ -58,11 +58,11 @@ bool valid_state(suspend_state_t state) | |||
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * suspend_valid_only_mem - generic memory-only valid callback | 61 | * suspend_valid_only_mem - Generic memory-only valid callback. |
62 | * | 62 | * |
63 | * Platform drivers that implement mem suspend only and only need | 63 | * Platform drivers that implement mem suspend only and only need to check for |
64 | * to check for that in their .valid callback can use this instead | 64 | * that in their .valid() callback can use this instead of rolling their own |
65 | * of rolling their own .valid callback. | 65 | * .valid() callback. |
66 | */ | 66 | */ |
67 | int suspend_valid_only_mem(suspend_state_t state) | 67 | int suspend_valid_only_mem(suspend_state_t state) |
68 | { | 68 | { |
@@ -83,10 +83,11 @@ static int suspend_test(int level) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * suspend_prepare - Do prep work before entering low-power state. | 86 | * suspend_prepare - Prepare for entering system sleep state. |
87 | * | 87 | * |
88 | * This is common code that is called for each state that we're entering. | 88 | * Common code run for every system sleep state that can be entered (except for |
89 | * Run suspend notifiers, allocate a console and stop all processes. | 89 | * hibernation). Run suspend notifiers, allocate the "suspend" console and |
90 | * freeze processes. | ||
90 | */ | 91 | */ |
91 | static int suspend_prepare(void) | 92 | static int suspend_prepare(void) |
92 | { | 93 | { |
@@ -131,9 +132,9 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | |||
131 | } | 132 | } |
132 | 133 | ||
133 | /** | 134 | /** |
134 | * suspend_enter - enter the desired system sleep state. | 135 | * suspend_enter - Make the system enter the given sleep state. |
135 | * @state: State to enter | 136 | * @state: System sleep state to enter. |
136 | * @wakeup: Returns information that suspend should not be entered again. | 137 | * @wakeup: Returns information that the sleep state should not be re-entered. |
137 | * | 138 | * |
138 | * This function should be called after devices have been suspended. | 139 | * This function should be called after devices have been suspended. |
139 | */ | 140 | */ |
@@ -147,7 +148,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
147 | goto Platform_finish; | 148 | goto Platform_finish; |
148 | } | 149 | } |
149 | 150 | ||
150 | error = dpm_suspend_noirq(PMSG_SUSPEND); | 151 | error = dpm_suspend_end(PMSG_SUSPEND); |
151 | if (error) { | 152 | if (error) { |
152 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 153 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
153 | goto Platform_finish; | 154 | goto Platform_finish; |
@@ -189,7 +190,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
189 | if (suspend_ops->wake) | 190 | if (suspend_ops->wake) |
190 | suspend_ops->wake(); | 191 | suspend_ops->wake(); |
191 | 192 | ||
192 | dpm_resume_noirq(PMSG_RESUME); | 193 | dpm_resume_start(PMSG_RESUME); |
193 | 194 | ||
194 | Platform_finish: | 195 | Platform_finish: |
195 | if (suspend_ops->finish) | 196 | if (suspend_ops->finish) |
@@ -199,9 +200,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
199 | } | 200 | } |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * suspend_devices_and_enter - suspend devices and enter the desired system | 203 | * suspend_devices_and_enter - Suspend devices and enter system sleep state. |
203 | * sleep state. | 204 | * @state: System sleep state to enter. |
204 | * @state: state to enter | ||
205 | */ | 205 | */ |
206 | int suspend_devices_and_enter(suspend_state_t state) | 206 | int suspend_devices_and_enter(suspend_state_t state) |
207 | { | 207 | { |
@@ -251,10 +251,10 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | /** | 253 | /** |
254 | * suspend_finish - Do final work before exiting suspend sequence. | 254 | * suspend_finish - Clean up before finishing the suspend sequence. |
255 | * | 255 | * |
256 | * Call platform code to clean up, restart processes, and free the | 256 | * Call platform code to clean up, restart processes, and free the console that |
257 | * console that we've allocated. This is not called for suspend-to-disk. | 257 | * we've allocated. This routine is not called for hibernation. |
258 | */ | 258 | */ |
259 | static void suspend_finish(void) | 259 | static void suspend_finish(void) |
260 | { | 260 | { |
@@ -265,16 +265,14 @@ static void suspend_finish(void) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | /** | 267 | /** |
268 | * enter_state - Do common work of entering low-power state. | 268 | * enter_state - Do common work needed to enter system sleep state. |
269 | * @state: pm_state structure for state we're entering. | 269 | * @state: System sleep state to enter. |
270 | * | 270 | * |
271 | * Make sure we're the only ones trying to enter a sleep state. Fail | 271 | * Make sure that no one else is trying to put the system into a sleep state. |
272 | * if someone has beat us to it, since we don't want anything weird to | 272 | * Fail if that's not the case. Otherwise, prepare for system suspend, make the |
273 | * happen when we wake up. | 273 | * system enter the given sleep state and clean up after wakeup. |
274 | * Then, do the setup for suspend, enter the state, and cleaup (after | ||
275 | * we've woken up). | ||
276 | */ | 274 | */ |
277 | int enter_state(suspend_state_t state) | 275 | static int enter_state(suspend_state_t state) |
278 | { | 276 | { |
279 | int error; | 277 | int error; |
280 | 278 | ||
@@ -310,24 +308,26 @@ int enter_state(suspend_state_t state) | |||
310 | } | 308 | } |
311 | 309 | ||
312 | /** | 310 | /** |
313 | * pm_suspend - Externally visible function for suspending system. | 311 | * pm_suspend - Externally visible function for suspending the system. |
314 | * @state: Enumerated value of state to enter. | 312 | * @state: System sleep state to enter. |
315 | * | 313 | * |
316 | * Determine whether or not value is within range, get state | 314 | * Check if the value of @state represents one of the supported states, |
317 | * structure, and enter (above). | 315 | * execute enter_state() and update system suspend statistics. |
318 | */ | 316 | */ |
319 | int pm_suspend(suspend_state_t state) | 317 | int pm_suspend(suspend_state_t state) |
320 | { | 318 | { |
321 | int ret; | 319 | int error; |
322 | if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) { | 320 | |
323 | ret = enter_state(state); | 321 | if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) |
324 | if (ret) { | 322 | return -EINVAL; |
325 | suspend_stats.fail++; | 323 | |
326 | dpm_save_failed_errno(ret); | 324 | error = enter_state(state); |
327 | } else | 325 | if (error) { |
328 | suspend_stats.success++; | 326 | suspend_stats.fail++; |
329 | return ret; | 327 | dpm_save_failed_errno(error); |
328 | } else { | ||
329 | suspend_stats.success++; | ||
330 | } | 330 | } |
331 | return -EINVAL; | 331 | return error; |
332 | } | 332 | } |
333 | EXPORT_SYMBOL(pm_suspend); | 333 | EXPORT_SYMBOL(pm_suspend); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 3e100075b13c..33c4329205af 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -249,16 +249,10 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
249 | } | 249 | } |
250 | pm_restore_gfp_mask(); | 250 | pm_restore_gfp_mask(); |
251 | error = hibernation_snapshot(data->platform_support); | 251 | error = hibernation_snapshot(data->platform_support); |
252 | if (error) { | 252 | if (!error) { |
253 | thaw_kernel_threads(); | ||
254 | } else { | ||
255 | error = put_user(in_suspend, (int __user *)arg); | 253 | error = put_user(in_suspend, (int __user *)arg); |
256 | if (!error && !freezer_test_done) | 254 | data->ready = !freezer_test_done && !error; |
257 | data->ready = 1; | 255 | freezer_test_done = false; |
258 | if (freezer_test_done) { | ||
259 | freezer_test_done = false; | ||
260 | thaw_kernel_threads(); | ||
261 | } | ||
262 | } | 256 | } |
263 | break; | 257 | break; |
264 | 258 | ||