aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/devices.txt93
-rw-r--r--arch/x86/kernel/apm_32.c11
-rw-r--r--drivers/base/power/generic_ops.c157
-rw-r--r--drivers/base/power/main.c247
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--include/linux/pm.h49
-rw-r--r--include/linux/suspend.h4
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/power/hibernate.c24
-rw-r--r--kernel/power/main.c8
-rw-r--r--kernel/power/suspend.c4
11 files changed, 467 insertions, 144 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 20af7def23c8..872815cd41d3 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -96,6 +96,12 @@ struct dev_pm_ops {
96 int (*thaw)(struct device *dev); 96 int (*thaw)(struct device *dev);
97 int (*poweroff)(struct device *dev); 97 int (*poweroff)(struct device *dev);
98 int (*restore)(struct device *dev); 98 int (*restore)(struct device *dev);
99 int (*suspend_late)(struct device *dev);
100 int (*resume_early)(struct device *dev);
101 int (*freeze_late)(struct device *dev);
102 int (*thaw_early)(struct device *dev);
103 int (*poweroff_late)(struct device *dev);
104 int (*restore_early)(struct device *dev);
99 int (*suspend_noirq)(struct device *dev); 105 int (*suspend_noirq)(struct device *dev);
100 int (*resume_noirq)(struct device *dev); 106 int (*resume_noirq)(struct device *dev);
101 int (*freeze_noirq)(struct device *dev); 107 int (*freeze_noirq)(struct device *dev);
@@ -305,7 +311,7 @@ Entering System Suspend
305----------------------- 311-----------------------
306When the system goes into the standby or memory sleep state, the phases are: 312When the system goes into the standby or memory sleep state, the phases are:
307 313
308 prepare, suspend, suspend_noirq. 314 prepare, suspend, suspend_late, suspend_noirq.
309 315
310 1. The prepare phase is meant to prevent races by preventing new devices 316 1. The prepare phase is meant to prevent races by preventing new devices
311 from being registered; the PM core would never know that all the 317 from being registered; the PM core would never know that all the
@@ -324,7 +330,12 @@ When the system goes into the standby or memory sleep state, the phases are:
324 appropriate low-power state, depending on the bus type the device is on, 330 appropriate low-power state, depending on the bus type the device is on,
325 and they may enable wakeup events. 331 and they may enable wakeup events.
326 332
327 3. The suspend_noirq phase occurs after IRQ handlers have been disabled, 333 3 For a number of devices it is convenient to split suspend into the
334 "quiesce device" and "save device state" phases, in which cases
335 suspend_late is meant to do the latter. It is always executed after
336 runtime power management has been disabled for all devices.
337
338 4. The suspend_noirq phase occurs after IRQ handlers have been disabled,
328 which means that the driver's interrupt handler will not be called while 339 which means that the driver's interrupt handler will not be called while
329 the callback method is running. The methods should save the values of 340 the callback method is running. The methods should save the values of
330 the device's registers that weren't saved previously and finally put the 341 the device's registers that weren't saved previously and finally put the
@@ -359,7 +370,7 @@ Leaving System Suspend
359---------------------- 370----------------------
360When resuming from standby or memory sleep, the phases are: 371When resuming from standby or memory sleep, the phases are:
361 372
362 resume_noirq, resume, complete. 373 resume_noirq, resume_early, resume, complete.
363 374
364 1. The resume_noirq callback methods should perform any actions needed 375 1. The resume_noirq callback methods should perform any actions needed
365 before the driver's interrupt handlers are invoked. This generally 376 before the driver's interrupt handlers are invoked. This generally
@@ -375,14 +386,18 @@ When resuming from standby or memory sleep, the phases are:
375 device driver's ->pm.resume_noirq() method to perform device-specific 386 device driver's ->pm.resume_noirq() method to perform device-specific
376 actions. 387 actions.
377 388
378 2. The resume methods should bring the the device back to its operating 389 2. The resume_early methods should prepare devices for the execution of
390 the resume methods. This generally involves undoing the actions of the
391 preceding suspend_late phase.
392
393 3 The resume methods should bring the the device back to its operating
379 state, so that it can perform normal I/O. This generally involves 394 state, so that it can perform normal I/O. This generally involves
380 undoing the actions of the suspend phase. 395 undoing the actions of the suspend phase.
381 396
382 3. The complete phase uses only a bus callback. The method should undo the 397 4. The complete phase should undo the actions of the prepare phase. Note,
383 actions of the prepare phase. Note, however, that new children may be 398 however, that new children may be registered below the device as soon as
384 registered below the device as soon as the resume callbacks occur; it's 399 the resume callbacks occur; it's not necessary to wait until the
385 not necessary to wait until the complete phase. 400 complete phase.
386 401
387At the end of these phases, drivers should be as functional as they were before 402At the end of these phases, drivers should be as functional as they were before
388suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are 403suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are
@@ -429,8 +444,8 @@ an image of the system memory while everything is stable, reactivate all
429devices (thaw), write the image to permanent storage, and finally shut down the 444devices (thaw), write the image to permanent storage, and finally shut down the
430system (poweroff). The phases used to accomplish this are: 445system (poweroff). The phases used to accomplish this are:
431 446
432 prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete, 447 prepare, freeze, freeze_late, freeze_noirq, thaw_noirq, thaw_early,
433 prepare, poweroff, poweroff_noirq 448 thaw, complete, prepare, poweroff, poweroff_late, poweroff_noirq
434 449
435 1. The prepare phase is discussed in the "Entering System Suspend" section 450 1. The prepare phase is discussed in the "Entering System Suspend" section
436 above. 451 above.
@@ -441,7 +456,11 @@ system (poweroff). The phases used to accomplish this are:
441 save time it's best not to do so. Also, the device should not be 456 save time it's best not to do so. Also, the device should not be
442 prepared to generate wakeup events. 457 prepared to generate wakeup events.
443 458
444 3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed 459 3. The freeze_late phase is analogous to the suspend_late phase described
460 above, except that the device should not be put in a low-power state and
461 should not be allowed to generate wakeup events by it.
462
463 4. The freeze_noirq phase is analogous to the suspend_noirq phase discussed
445 above, except again that the device should not be put in a low-power 464 above, except again that the device should not be put in a low-power
446 state and should not be allowed to generate wakeup events. 465 state and should not be allowed to generate wakeup events.
447 466
@@ -449,15 +468,19 @@ At this point the system image is created. All devices should be inactive and
449the contents of memory should remain undisturbed while this happens, so that the 468the contents of memory should remain undisturbed while this happens, so that the
450image forms an atomic snapshot of the system state. 469image forms an atomic snapshot of the system state.
451 470
452 4. The thaw_noirq phase is analogous to the resume_noirq phase discussed 471 5. The thaw_noirq phase is analogous to the resume_noirq phase discussed
453 above. The main difference is that its methods can assume the device is 472 above. The main difference is that its methods can assume the device is
454 in the same state as at the end of the freeze_noirq phase. 473 in the same state as at the end of the freeze_noirq phase.
455 474
456 5. The thaw phase is analogous to the resume phase discussed above. Its 475 6. The thaw_early phase is analogous to the resume_early phase described
476 above. Its methods should undo the actions of the preceding
477 freeze_late, if necessary.
478
479 7. The thaw phase is analogous to the resume phase discussed above. Its
457 methods should bring the device back to an operating state, so that it 480 methods should bring the device back to an operating state, so that it
458 can be used for saving the image if necessary. 481 can be used for saving the image if necessary.
459 482
460 6. The complete phase is discussed in the "Leaving System Suspend" section 483 8. The complete phase is discussed in the "Leaving System Suspend" section
461 above. 484 above.
462 485
463At this point the system image is saved, and the devices then need to be 486At this point the system image is saved, and the devices then need to be
@@ -465,16 +488,19 @@ prepared for the upcoming system shutdown. This is much like suspending them
465before putting the system into the standby or memory sleep state, and the phases 488before putting the system into the standby or memory sleep state, and the phases
466are similar. 489are similar.
467 490
468 7. The prepare phase is discussed above. 491 9. The prepare phase is discussed above.
492
493 10. The poweroff phase is analogous to the suspend phase.
469 494
470 8. The poweroff phase is analogous to the suspend phase. 495 11. The poweroff_late phase is analogous to the suspend_late phase.
471 496
472 9. The poweroff_noirq phase is analogous to the suspend_noirq phase. 497 12. The poweroff_noirq phase is analogous to the suspend_noirq phase.
473 498
474The poweroff and poweroff_noirq callbacks should do essentially the same things 499The poweroff, poweroff_late and poweroff_noirq callbacks should do essentially
475as the suspend and suspend_noirq callbacks. The only notable difference is that 500the same things as the suspend, suspend_late and suspend_noirq callbacks,
476they need not store the device register values, because the registers should 501respectively. The only notable difference is that they need not store the
477already have been stored during the freeze or freeze_noirq phases. 502device register values, because the registers should already have been stored
503during the freeze, freeze_late or freeze_noirq phases.
478 504
479 505
480Leaving Hibernation 506Leaving Hibernation
@@ -518,22 +544,25 @@ To achieve this, the image kernel must restore the devices' pre-hibernation
518functionality. The operation is much like waking up from the memory sleep 544functionality. The operation is much like waking up from the memory sleep
519state, although it involves different phases: 545state, although it involves different phases:
520 546
521 restore_noirq, restore, complete 547 restore_noirq, restore_early, restore, complete
522 548
523 1. The restore_noirq phase is analogous to the resume_noirq phase. 549 1. The restore_noirq phase is analogous to the resume_noirq phase.
524 550
525 2. The restore phase is analogous to the resume phase. 551 2. The restore_early phase is analogous to the resume_early phase.
552
553 3. The restore phase is analogous to the resume phase.
526 554
527 3. The complete phase is discussed above. 555 4. The complete phase is discussed above.
528 556
529The main difference from resume[_noirq] is that restore[_noirq] must assume the 557The main difference from resume[_early|_noirq] is that restore[_early|_noirq]
530device has been accessed and reconfigured by the boot loader or the boot kernel. 558must assume the device has been accessed and reconfigured by the boot loader or
531Consequently the state of the device may be different from the state remembered 559the boot kernel. Consequently the state of the device may be different from the
532from the freeze and freeze_noirq phases. The device may even need to be reset 560state remembered from the freeze, freeze_late and freeze_noirq phases. The
533and completely re-initialized. In many cases this difference doesn't matter, so 561device may even need to be reset and completely re-initialized. In many cases
534the resume[_noirq] and restore[_norq] method pointers can be set to the same 562this difference doesn't matter, so the resume[_early|_noirq] and
535routines. Nevertheless, different callback pointers are used in case there is a 563restore[_early|_norq] method pointers can be set to the same routines.
536situation where it actually matters. 564Nevertheless, different callback pointers are used in case there is a situation
565where it actually does matter.
537 566
538 567
539Device Power Management Domains 568Device Power Management Domains
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f76623cbe263..5d56931a15b3 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1234,8 +1234,7 @@ static int suspend(int vetoable)
1234 struct apm_user *as; 1234 struct apm_user *as;
1235 1235
1236 dpm_suspend_start(PMSG_SUSPEND); 1236 dpm_suspend_start(PMSG_SUSPEND);
1237 1237 dpm_suspend_end(PMSG_SUSPEND);
1238 dpm_suspend_noirq(PMSG_SUSPEND);
1239 1238
1240 local_irq_disable(); 1239 local_irq_disable();
1241 syscore_suspend(); 1240 syscore_suspend();
@@ -1259,9 +1258,9 @@ static int suspend(int vetoable)
1259 syscore_resume(); 1258 syscore_resume();
1260 local_irq_enable(); 1259 local_irq_enable();
1261 1260
1262 dpm_resume_noirq(PMSG_RESUME); 1261 dpm_resume_start(PMSG_RESUME);
1263
1264 dpm_resume_end(PMSG_RESUME); 1262 dpm_resume_end(PMSG_RESUME);
1263
1265 queue_event(APM_NORMAL_RESUME, NULL); 1264 queue_event(APM_NORMAL_RESUME, NULL);
1266 spin_lock(&user_list_lock); 1265 spin_lock(&user_list_lock);
1267 for (as = user_list; as != NULL; as = as->next) { 1266 for (as = user_list; as != NULL; as = as->next) {
@@ -1277,7 +1276,7 @@ static void standby(void)
1277{ 1276{
1278 int err; 1277 int err;
1279 1278
1280 dpm_suspend_noirq(PMSG_SUSPEND); 1279 dpm_suspend_end(PMSG_SUSPEND);
1281 1280
1282 local_irq_disable(); 1281 local_irq_disable();
1283 syscore_suspend(); 1282 syscore_suspend();
@@ -1291,7 +1290,7 @@ static void standby(void)
1291 syscore_resume(); 1290 syscore_resume();
1292 local_irq_enable(); 1291 local_irq_enable();
1293 1292
1294 dpm_resume_noirq(PMSG_RESUME); 1293 dpm_resume_start(PMSG_RESUME);
1295} 1294}
1296 1295
1297static apm_event_t get_event(void) 1296static apm_event_t get_event(void)
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 10bdd793f0bd..d03d290f31c2 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
92} 92}
93 93
94/** 94/**
95 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 95 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
96 * @dev: Device to handle. 96 * @dev: Device to suspend.
97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage.
99 *
100 * Execute the PM callback corresponding to @event provided by the driver of
101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * not present.
103 */ 97 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 98int pm_generic_suspend_noirq(struct device *dev)
105{ 99{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 100 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *);
108
109 if (!pm)
110 return 0;
111
112 switch (event) {
113 case PM_EVENT_SUSPEND:
114 callback = noirq ? pm->suspend_noirq : pm->suspend;
115 break;
116 case PM_EVENT_FREEZE:
117 callback = noirq ? pm->freeze_noirq : pm->freeze;
118 break;
119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
125 case PM_EVENT_THAW:
126 callback = noirq ? pm->thaw_noirq : pm->thaw;
127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
131 default:
132 callback = NULL;
133 break;
134 }
135 101
136 return callback ? callback(dev) : 0; 102 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
137} 103}
104EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
138 105
139/** 106/**
140 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. 107 * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
141 * @dev: Device to suspend. 108 * @dev: Device to suspend.
142 */ 109 */
143int pm_generic_suspend_noirq(struct device *dev) 110int pm_generic_suspend_late(struct device *dev)
144{ 111{
145 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); 112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113
114 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
146} 115}
147EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); 116EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
148 117
149/** 118/**
150 * pm_generic_suspend - Generic suspend callback for subsystems. 119 * pm_generic_suspend - Generic suspend callback for subsystems.
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
152 */ 121 */
153int pm_generic_suspend(struct device *dev) 122int pm_generic_suspend(struct device *dev)
154{ 123{
155 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); 124 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
125
126 return pm && pm->suspend ? pm->suspend(dev) : 0;
156} 127}
157EXPORT_SYMBOL_GPL(pm_generic_suspend); 128EXPORT_SYMBOL_GPL(pm_generic_suspend);
158 129
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
162 */ 133 */
163int pm_generic_freeze_noirq(struct device *dev) 134int pm_generic_freeze_noirq(struct device *dev)
164{ 135{
165 return __pm_generic_call(dev, PM_EVENT_FREEZE, true); 136 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
137
138 return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
166} 139}
167EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); 140EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
168 141
169/** 142/**
143 * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
144 * @dev: Device to freeze.
145 */
146int pm_generic_freeze_late(struct device *dev)
147{
148 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
149
150 return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
151}
152EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
153
154/**
170 * pm_generic_freeze - Generic freeze callback for subsystems. 155 * pm_generic_freeze - Generic freeze callback for subsystems.
171 * @dev: Device to freeze. 156 * @dev: Device to freeze.
172 */ 157 */
173int pm_generic_freeze(struct device *dev) 158int pm_generic_freeze(struct device *dev)
174{ 159{
175 return __pm_generic_call(dev, PM_EVENT_FREEZE, false); 160 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161
162 return pm && pm->freeze ? pm->freeze(dev) : 0;
176} 163}
177EXPORT_SYMBOL_GPL(pm_generic_freeze); 164EXPORT_SYMBOL_GPL(pm_generic_freeze);
178 165
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
182 */ 169 */
183int pm_generic_poweroff_noirq(struct device *dev) 170int pm_generic_poweroff_noirq(struct device *dev)
184{ 171{
185 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); 172 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
173
174 return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
186} 175}
187EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); 176EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
188 177
189/** 178/**
179 * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
180 * @dev: Device to handle.
181 */
182int pm_generic_poweroff_late(struct device *dev)
183{
184 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
185
186 return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
187}
188EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
189
190/**
190 * pm_generic_poweroff - Generic poweroff callback for subsystems. 191 * pm_generic_poweroff - Generic poweroff callback for subsystems.
191 * @dev: Device to handle. 192 * @dev: Device to handle.
192 */ 193 */
193int pm_generic_poweroff(struct device *dev) 194int pm_generic_poweroff(struct device *dev)
194{ 195{
195 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); 196 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197
198 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
196} 199}
197EXPORT_SYMBOL_GPL(pm_generic_poweroff); 200EXPORT_SYMBOL_GPL(pm_generic_poweroff);
198 201
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
202 */ 205 */
203int pm_generic_thaw_noirq(struct device *dev) 206int pm_generic_thaw_noirq(struct device *dev)
204{ 207{
205 return __pm_generic_call(dev, PM_EVENT_THAW, true); 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209
210 return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
206} 211}
207EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); 212EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
208 213
209/** 214/**
215 * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
216 * @dev: Device to thaw.
217 */
218int pm_generic_thaw_early(struct device *dev)
219{
220 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221
222 return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
223}
224EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
225
226/**
210 * pm_generic_thaw - Generic thaw callback for subsystems. 227 * pm_generic_thaw - Generic thaw callback for subsystems.
211 * @dev: Device to thaw. 228 * @dev: Device to thaw.
212 */ 229 */
213int pm_generic_thaw(struct device *dev) 230int pm_generic_thaw(struct device *dev)
214{ 231{
215 return __pm_generic_call(dev, PM_EVENT_THAW, false); 232 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
233
234 return pm && pm->thaw ? pm->thaw(dev) : 0;
216} 235}
217EXPORT_SYMBOL_GPL(pm_generic_thaw); 236EXPORT_SYMBOL_GPL(pm_generic_thaw);
218 237
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
222 */ 241 */
223int pm_generic_resume_noirq(struct device *dev) 242int pm_generic_resume_noirq(struct device *dev)
224{ 243{
225 return __pm_generic_call(dev, PM_EVENT_RESUME, true); 244 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
245
246 return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
226} 247}
227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 248EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
228 249
229/** 250/**
251 * pm_generic_resume_early - Generic resume_early callback for subsystems.
252 * @dev: Device to resume.
253 */
254int pm_generic_resume_early(struct device *dev)
255{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257
258 return pm && pm->resume_early ? pm->resume_early(dev) : 0;
259}
260EXPORT_SYMBOL_GPL(pm_generic_resume_early);
261
262/**
230 * pm_generic_resume - Generic resume callback for subsystems. 263 * pm_generic_resume - Generic resume callback for subsystems.
231 * @dev: Device to resume. 264 * @dev: Device to resume.
232 */ 265 */
233int pm_generic_resume(struct device *dev) 266int pm_generic_resume(struct device *dev)
234{ 267{
235 return __pm_generic_call(dev, PM_EVENT_RESUME, false); 268 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
269
270 return pm && pm->resume ? pm->resume(dev) : 0;
236} 271}
237EXPORT_SYMBOL_GPL(pm_generic_resume); 272EXPORT_SYMBOL_GPL(pm_generic_resume);
238 273
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
242 */ 277 */
243int pm_generic_restore_noirq(struct device *dev) 278int pm_generic_restore_noirq(struct device *dev)
244{ 279{
245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true); 280 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
281
282 return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
246} 283}
247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
248 285
249/** 286/**
287 * pm_generic_restore_early - Generic restore_early callback for subsystems.
288 * @dev: Device to resume.
289 */
290int pm_generic_restore_early(struct device *dev)
291{
292 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
293
294 return pm && pm->restore_early ? pm->restore_early(dev) : 0;
295}
296EXPORT_SYMBOL_GPL(pm_generic_restore_early);
297
298/**
250 * pm_generic_restore - Generic restore callback for subsystems. 299 * pm_generic_restore - Generic restore callback for subsystems.
251 * @dev: Device to restore. 300 * @dev: Device to restore.
252 */ 301 */
253int pm_generic_restore(struct device *dev) 302int pm_generic_restore(struct device *dev)
254{ 303{
255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false); 304 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
305
306 return pm && pm->restore ? pm->restore(dev) : 0;
256} 307}
257EXPORT_SYMBOL_GPL(pm_generic_restore); 308EXPORT_SYMBOL_GPL(pm_generic_restore);
258 309
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e2cc3d2e0ecc..b462c0e341cb 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
47LIST_HEAD(dpm_list); 47LIST_HEAD(dpm_list);
48LIST_HEAD(dpm_prepared_list); 48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list); 49LIST_HEAD(dpm_suspended_list);
50LIST_HEAD(dpm_late_early_list);
50LIST_HEAD(dpm_noirq_list); 51LIST_HEAD(dpm_noirq_list);
51 52
52struct suspend_stats suspend_stats; 53struct suspend_stats suspend_stats;
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
246} 247}
247 248
248/** 249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
283/**
249 * pm_noirq_op - Return the PM operation appropriate for given PM event. 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
250 * @ops: PM operations to choose from. 285 * @ops: PM operations to choose from.
251 * @state: PM transition of the system being carried out. 286 * @state: PM transition of the system being carried out.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
374 TRACE_RESUME(0); 409 TRACE_RESUME(0);
375 410
376 if (dev->pm_domain) { 411 if (dev->pm_domain) {
377 info = "EARLY power domain "; 412 info = "noirq power domain ";
378 callback = pm_noirq_op(&dev->pm_domain->ops, state); 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379 } else if (dev->type && dev->type->pm) { 414 } else if (dev->type && dev->type->pm) {
380 info = "EARLY type "; 415 info = "noirq type ";
381 callback = pm_noirq_op(dev->type->pm, state); 416 callback = pm_noirq_op(dev->type->pm, state);
382 } else if (dev->class && dev->class->pm) { 417 } else if (dev->class && dev->class->pm) {
383 info = "EARLY class "; 418 info = "noirq class ";
384 callback = pm_noirq_op(dev->class->pm, state); 419 callback = pm_noirq_op(dev->class->pm, state);
385 } else if (dev->bus && dev->bus->pm) { 420 } else if (dev->bus && dev->bus->pm) {
386 info = "EARLY bus "; 421 info = "noirq bus ";
387 callback = pm_noirq_op(dev->bus->pm, state); 422 callback = pm_noirq_op(dev->bus->pm, state);
388 } 423 }
389 424
390 if (!callback && dev->driver && dev->driver->pm) { 425 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver "; 426 info = "noirq driver ";
392 callback = pm_noirq_op(dev->driver->pm, state); 427 callback = pm_noirq_op(dev->driver->pm, state);
393 } 428 }
394 429
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
399} 434}
400 435
401/** 436/**
402 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
403 * @state: PM transition of the system being carried out. 438 * @state: PM transition of the system being carried out.
404 * 439 *
405 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
406 * enable device drivers to receive interrupts. 441 * enable device drivers to receive interrupts.
407 */ 442 */
408void dpm_resume_noirq(pm_message_t state) 443static void dpm_resume_noirq(pm_message_t state)
409{ 444{
410 ktime_t starttime = ktime_get(); 445 ktime_t starttime = ktime_get();
411 446
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
415 int error; 450 int error;
416 451
417 get_device(dev); 452 get_device(dev);
418 list_move_tail(&dev->power.entry, &dpm_suspended_list); 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
419 mutex_unlock(&dpm_list_mtx); 454 mutex_unlock(&dpm_list_mtx);
420 455
421 error = device_resume_noirq(dev, state); 456 error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
423 suspend_stats.failed_resume_noirq++; 458 suspend_stats.failed_resume_noirq++;
424 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
425 dpm_save_failed_dev(dev_name(dev)); 460 dpm_save_failed_dev(dev_name(dev));
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470}
471
472/**
473 * device_resume_early - Execute an "early resume" callback for given device.
474 * @dev: Device to handle.
475 * @state: PM transition of the system being carried out.
476 *
477 * Runtime PM is disabled for @dev while this function is being executed.
478 */
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->pm_domain) {
489 info = "early power domain ";
490 callback = pm_late_early_op(&dev->pm_domain->ops, state);
491 } else if (dev->type && dev->type->pm) {
492 info = "early type ";
493 callback = pm_late_early_op(dev->type->pm, state);
494 } else if (dev->class && dev->class->pm) {
495 info = "early class ";
496 callback = pm_late_early_op(dev->class->pm, state);
497 } else if (dev->bus && dev->bus->pm) {
498 info = "early bus ";
499 callback = pm_late_early_op(dev->bus->pm, state);
500 }
501
502 if (!callback && dev->driver && dev->driver->pm) {
503 info = "early driver ";
504 callback = pm_late_early_op(dev->driver->pm, state);
505 }
506
507 error = dpm_run_callback(callback, dev, state, info);
508
509 TRACE_RESUME(error);
510 return error;
511}
512
513/**
514 * dpm_resume_early - Execute "early resume" callbacks for all devices.
515 * @state: PM transition of the system being carried out.
516 */
517static void dpm_resume_early(pm_message_t state)
518{
519 ktime_t starttime = ktime_get();
520
521 mutex_lock(&dpm_list_mtx);
522 while (!list_empty(&dpm_late_early_list)) {
523 struct device *dev = to_device(dpm_late_early_list.next);
524 int error;
525
526 get_device(dev);
527 list_move_tail(&dev->power.entry, &dpm_suspended_list);
528 mutex_unlock(&dpm_list_mtx);
529
530 error = device_resume_early(dev, state);
531 if (error) {
532 suspend_stats.failed_resume_early++;
533 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
534 dpm_save_failed_dev(dev_name(dev));
426 pm_dev_err(dev, state, " early", error); 535 pm_dev_err(dev, state, " early", error);
427 } 536 }
428 537
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
431 } 540 }
432 mutex_unlock(&dpm_list_mtx); 541 mutex_unlock(&dpm_list_mtx);
433 dpm_show_time(starttime, state, "early"); 542 dpm_show_time(starttime, state, "early");
434 resume_device_irqs();
435} 543}
436EXPORT_SYMBOL_GPL(dpm_resume_noirq); 544
545/**
546 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
547 * @state: PM transition of the system being carried out.
548 */
549void dpm_resume_start(pm_message_t state)
550{
551 dpm_resume_noirq(state);
552 dpm_resume_early(state);
553}
554EXPORT_SYMBOL_GPL(dpm_resume_start);
437 555
438/** 556/**
439 * device_resume - Execute "resume" callbacks for given device. 557 * device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
716 char *info = NULL; 834 char *info = NULL;
717 835
718 if (dev->pm_domain) { 836 if (dev->pm_domain) {
719 info = "LATE power domain "; 837 info = "noirq power domain ";
720 callback = pm_noirq_op(&dev->pm_domain->ops, state); 838 callback = pm_noirq_op(&dev->pm_domain->ops, state);
721 } else if (dev->type && dev->type->pm) { 839 } else if (dev->type && dev->type->pm) {
722 info = "LATE type "; 840 info = "noirq type ";
723 callback = pm_noirq_op(dev->type->pm, state); 841 callback = pm_noirq_op(dev->type->pm, state);
724 } else if (dev->class && dev->class->pm) { 842 } else if (dev->class && dev->class->pm) {
725 info = "LATE class "; 843 info = "noirq class ";
726 callback = pm_noirq_op(dev->class->pm, state); 844 callback = pm_noirq_op(dev->class->pm, state);
727 } else if (dev->bus && dev->bus->pm) { 845 } else if (dev->bus && dev->bus->pm) {
728 info = "LATE bus "; 846 info = "noirq bus ";
729 callback = pm_noirq_op(dev->bus->pm, state); 847 callback = pm_noirq_op(dev->bus->pm, state);
730 } 848 }
731 849
732 if (!callback && dev->driver && dev->driver->pm) { 850 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver "; 851 info = "noirq driver ";
734 callback = pm_noirq_op(dev->driver->pm, state); 852 callback = pm_noirq_op(dev->driver->pm, state);
735 } 853 }
736 854
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
738} 856}
739 857
740/** 858/**
741 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
742 * @state: PM transition of the system being carried out. 860 * @state: PM transition of the system being carried out.
743 * 861 *
744 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
745 * handlers for all non-sysdev devices. 863 * handlers for all non-sysdev devices.
746 */ 864 */
747int dpm_suspend_noirq(pm_message_t state) 865static int dpm_suspend_noirq(pm_message_t state)
748{ 866{
749 ktime_t starttime = ktime_get(); 867 ktime_t starttime = ktime_get();
750 int error = 0; 868 int error = 0;
751 869
752 suspend_device_irqs(); 870 suspend_device_irqs();
753 mutex_lock(&dpm_list_mtx); 871 mutex_lock(&dpm_list_mtx);
754 while (!list_empty(&dpm_suspended_list)) { 872 while (!list_empty(&dpm_late_early_list)) {
755 struct device *dev = to_device(dpm_suspended_list.prev); 873 struct device *dev = to_device(dpm_late_early_list.prev);
756 874
757 get_device(dev); 875 get_device(dev);
758 mutex_unlock(&dpm_list_mtx); 876 mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
761 879
762 mutex_lock(&dpm_list_mtx); 880 mutex_lock(&dpm_list_mtx);
763 if (error) { 881 if (error) {
764 pm_dev_err(dev, state, " late", error); 882 pm_dev_err(dev, state, " noirq", error);
765 suspend_stats.failed_suspend_noirq++; 883 suspend_stats.failed_suspend_noirq++;
766 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
767 dpm_save_failed_dev(dev_name(dev)); 885 dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
776 if (error) 894 if (error)
777 dpm_resume_noirq(resume_event(state)); 895 dpm_resume_noirq(resume_event(state));
778 else 896 else
897 dpm_show_time(starttime, state, "noirq");
898 return error;
899}
900
901/**
902 * device_suspend_late - Execute a "late suspend" callback for given device.
903 * @dev: Device to handle.
904 * @state: PM transition of the system being carried out.
905 *
906 * Runtime PM is disabled for @dev while this function is being executed.
907 */
908static int device_suspend_late(struct device *dev, pm_message_t state)
909{
910 pm_callback_t callback = NULL;
911 char *info = NULL;
912
913 if (dev->pm_domain) {
914 info = "late power domain ";
915 callback = pm_late_early_op(&dev->pm_domain->ops, state);
916 } else if (dev->type && dev->type->pm) {
917 info = "late type ";
918 callback = pm_late_early_op(dev->type->pm, state);
919 } else if (dev->class && dev->class->pm) {
920 info = "late class ";
921 callback = pm_late_early_op(dev->class->pm, state);
922 } else if (dev->bus && dev->bus->pm) {
923 info = "late bus ";
924 callback = pm_late_early_op(dev->bus->pm, state);
925 }
926
927 if (!callback && dev->driver && dev->driver->pm) {
928 info = "late driver ";
929 callback = pm_late_early_op(dev->driver->pm, state);
930 }
931
932 return dpm_run_callback(callback, dev, state, info);
933}
934
935/**
936 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
937 * @state: PM transition of the system being carried out.
938 */
939static int dpm_suspend_late(pm_message_t state)
940{
941 ktime_t starttime = ktime_get();
942 int error = 0;
943
944 mutex_lock(&dpm_list_mtx);
945 while (!list_empty(&dpm_suspended_list)) {
946 struct device *dev = to_device(dpm_suspended_list.prev);
947
948 get_device(dev);
949 mutex_unlock(&dpm_list_mtx);
950
951 error = device_suspend_late(dev, state);
952
953 mutex_lock(&dpm_list_mtx);
954 if (error) {
955 pm_dev_err(dev, state, " late", error);
956 suspend_stats.failed_suspend_late++;
957 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
958 dpm_save_failed_dev(dev_name(dev));
959 put_device(dev);
960 break;
961 }
962 if (!list_empty(&dev->power.entry))
963 list_move(&dev->power.entry, &dpm_late_early_list);
964 put_device(dev);
965 }
966 mutex_unlock(&dpm_list_mtx);
967 if (error)
968 dpm_resume_early(resume_event(state));
969 else
779 dpm_show_time(starttime, state, "late"); 970 dpm_show_time(starttime, state, "late");
971
780 return error; 972 return error;
781} 973}
782EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 974
975/**
976 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
977 * @state: PM transition of the system being carried out.
978 */
979int dpm_suspend_end(pm_message_t state)
980{
981 int error = dpm_suspend_late(state);
982
983 return error ? : dpm_suspend_noirq(state);
984}
985EXPORT_SYMBOL_GPL(dpm_suspend_end);
783 986
784/** 987/**
785 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 988 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ce4fa0831860..9e14ae6cd49c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -129,9 +129,9 @@ static void do_suspend(void)
129 printk(KERN_DEBUG "suspending xenstore...\n"); 129 printk(KERN_DEBUG "suspending xenstore...\n");
130 xs_suspend(); 130 xs_suspend();
131 131
132 err = dpm_suspend_noirq(PMSG_FREEZE); 132 err = dpm_suspend_end(PMSG_FREEZE);
133 if (err) { 133 if (err) {
134 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); 134 printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
135 goto out_resume; 135 goto out_resume;
136 } 136 }
137 137
@@ -149,7 +149,7 @@ static void do_suspend(void)
149 149
150 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 150 err = stop_machine(xen_suspend, &si, cpumask_of(0));
151 151
152 dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 152 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
153 153
154 if (err) { 154 if (err) {
155 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 155 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e4982ac3fbbc..73c610573a74 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -110,6 +110,10 @@ typedef struct pm_message {
110 * Subsystem-level @suspend() is executed for all devices after invoking 110 * Subsystem-level @suspend() is executed for all devices after invoking
111 * subsystem-level @prepare() for all of them. 111 * subsystem-level @prepare() for all of them.
112 * 112 *
113 * @suspend_late: Continue operations started by @suspend(). For a number of
114 * devices @suspend_late() may point to the same callback routine as the
115 * runtime suspend callback.
116 *
113 * @resume: Executed after waking the system up from a sleep state in which the 117 * @resume: Executed after waking the system up from a sleep state in which the
114 * contents of main memory were preserved. The exact action to perform 118 * contents of main memory were preserved. The exact action to perform
115 * depends on the device's subsystem, but generally the driver is expected 119 * depends on the device's subsystem, but generally the driver is expected
@@ -122,6 +126,10 @@ typedef struct pm_message {
122 * Subsystem-level @resume() is executed for all devices after invoking 126 * Subsystem-level @resume() is executed for all devices after invoking
123 * subsystem-level @resume_noirq() for all of them. 127 * subsystem-level @resume_noirq() for all of them.
124 * 128 *
129 * @resume_early: Prepare to execute @resume(). For a number of devices
130 * @resume_early() may point to the same callback routine as the runtime
131 * resume callback.
132 *
125 * @freeze: Hibernation-specific, executed before creating a hibernation image. 133 * @freeze: Hibernation-specific, executed before creating a hibernation image.
126 * Analogous to @suspend(), but it should not enable the device to signal 134 * Analogous to @suspend(), but it should not enable the device to signal
127 * wakeup events or change its power state. The majority of subsystems 135 * wakeup events or change its power state. The majority of subsystems
@@ -131,6 +139,10 @@ typedef struct pm_message {
131 * Subsystem-level @freeze() is executed for all devices after invoking 139 * Subsystem-level @freeze() is executed for all devices after invoking
132 * subsystem-level @prepare() for all of them. 140 * subsystem-level @prepare() for all of them.
133 * 141 *
142 * @freeze_late: Continue operations started by @freeze(). Analogous to
143 * @suspend_late(), but it should not enable the device to signal wakeup
144 * events or change its power state.
145 *
134 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 146 * @thaw: Hibernation-specific, executed after creating a hibernation image OR
135 * if the creation of an image has failed. Also executed after a failing 147 * if the creation of an image has failed. Also executed after a failing
136 * attempt to restore the contents of main memory from such an image. 148 * attempt to restore the contents of main memory from such an image.
@@ -140,15 +152,23 @@ typedef struct pm_message {
140 * subsystem-level @thaw_noirq() for all of them. It also may be executed 152 * subsystem-level @thaw_noirq() for all of them. It also may be executed
141 * directly after @freeze() in case of a transition error. 153 * directly after @freeze() in case of a transition error.
142 * 154 *
155 * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the
156 * preceding @freeze_late().
157 *
143 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 158 * @poweroff: Hibernation-specific, executed after saving a hibernation image.
144 * Analogous to @suspend(), but it need not save the device's settings in 159 * Analogous to @suspend(), but it need not save the device's settings in
145 * memory. 160 * memory.
146 * Subsystem-level @poweroff() is executed for all devices after invoking 161 * Subsystem-level @poweroff() is executed for all devices after invoking
147 * subsystem-level @prepare() for all of them. 162 * subsystem-level @prepare() for all of them.
148 * 163 *
164 * @poweroff_late: Continue operations started by @poweroff(). Analogous to
165 * @suspend_late(), but it need not save the device's settings in memory.
166 *
149 * @restore: Hibernation-specific, executed after restoring the contents of main 167 * @restore: Hibernation-specific, executed after restoring the contents of main
150 * memory from a hibernation image, analogous to @resume(). 168 * memory from a hibernation image, analogous to @resume().
151 * 169 *
170 * @restore_early: Prepare to execute @restore(), analogous to @resume_early().
171 *
152 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 172 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
153 * additional operations required for suspending the device that might be 173 * additional operations required for suspending the device that might be
154 * racing with its driver's interrupt handler, which is guaranteed not to 174 * racing with its driver's interrupt handler, which is guaranteed not to
@@ -158,9 +178,10 @@ typedef struct pm_message {
158 * @suspend_noirq() has returned successfully. If the device can generate 178 * @suspend_noirq() has returned successfully. If the device can generate
159 * system wakeup signals and is enabled to wake up the system, it should be 179 * system wakeup signals and is enabled to wake up the system, it should be
160 * configured to do so at that time. However, depending on the platform 180 * configured to do so at that time. However, depending on the platform
161 * and device's subsystem, @suspend() may be allowed to put the device into 181 * and device's subsystem, @suspend() or @suspend_late() may be allowed to
162 * the low-power state and configure it to generate wakeup signals, in 182 * put the device into the low-power state and configure it to generate
163 * which case it generally is not necessary to define @suspend_noirq(). 183 * wakeup signals, in which case it generally is not necessary to define
184 * @suspend_noirq().
164 * 185 *
165 * @resume_noirq: Prepare for the execution of @resume() by carrying out any 186 * @resume_noirq: Prepare for the execution of @resume() by carrying out any
166 * operations required for resuming the device that might be racing with 187 * operations required for resuming the device that might be racing with
@@ -171,9 +192,9 @@ typedef struct pm_message {
171 * additional operations required for freezing the device that might be 192 * additional operations required for freezing the device that might be
172 * racing with its driver's interrupt handler, which is guaranteed not to 193 * racing with its driver's interrupt handler, which is guaranteed not to
173 * run while @freeze_noirq() is being executed. 194 * run while @freeze_noirq() is being executed.
174 * The power state of the device should not be changed by either @freeze() 195 * The power state of the device should not be changed by either @freeze(),
175 * or @freeze_noirq() and it should not be configured to signal system 196 * or @freeze_late(), or @freeze_noirq() and it should not be configured to
176 * wakeup by any of these callbacks. 197 * signal system wakeup by any of these callbacks.
177 * 198 *
178 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 199 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
179 * operations required for thawing the device that might be racing with its 200 * operations required for thawing the device that might be racing with its
@@ -249,6 +270,12 @@ struct dev_pm_ops {
249 int (*thaw)(struct device *dev); 270 int (*thaw)(struct device *dev);
250 int (*poweroff)(struct device *dev); 271 int (*poweroff)(struct device *dev);
251 int (*restore)(struct device *dev); 272 int (*restore)(struct device *dev);
273 int (*suspend_late)(struct device *dev);
274 int (*resume_early)(struct device *dev);
275 int (*freeze_late)(struct device *dev);
276 int (*thaw_early)(struct device *dev);
277 int (*poweroff_late)(struct device *dev);
278 int (*restore_early)(struct device *dev);
252 int (*suspend_noirq)(struct device *dev); 279 int (*suspend_noirq)(struct device *dev);
253 int (*resume_noirq)(struct device *dev); 280 int (*resume_noirq)(struct device *dev);
254 int (*freeze_noirq)(struct device *dev); 281 int (*freeze_noirq)(struct device *dev);
@@ -584,13 +611,13 @@ struct dev_pm_domain {
584 611
585#ifdef CONFIG_PM_SLEEP 612#ifdef CONFIG_PM_SLEEP
586extern void device_pm_lock(void); 613extern void device_pm_lock(void);
587extern void dpm_resume_noirq(pm_message_t state); 614extern void dpm_resume_start(pm_message_t state);
588extern void dpm_resume_end(pm_message_t state); 615extern void dpm_resume_end(pm_message_t state);
589extern void dpm_resume(pm_message_t state); 616extern void dpm_resume(pm_message_t state);
590extern void dpm_complete(pm_message_t state); 617extern void dpm_complete(pm_message_t state);
591 618
592extern void device_pm_unlock(void); 619extern void device_pm_unlock(void);
593extern int dpm_suspend_noirq(pm_message_t state); 620extern int dpm_suspend_end(pm_message_t state);
594extern int dpm_suspend_start(pm_message_t state); 621extern int dpm_suspend_start(pm_message_t state);
595extern int dpm_suspend(pm_message_t state); 622extern int dpm_suspend(pm_message_t state);
596extern int dpm_prepare(pm_message_t state); 623extern int dpm_prepare(pm_message_t state);
@@ -605,17 +632,23 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
605extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 632extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
606 633
607extern int pm_generic_prepare(struct device *dev); 634extern int pm_generic_prepare(struct device *dev);
635extern int pm_generic_suspend_late(struct device *dev);
608extern int pm_generic_suspend_noirq(struct device *dev); 636extern int pm_generic_suspend_noirq(struct device *dev);
609extern int pm_generic_suspend(struct device *dev); 637extern int pm_generic_suspend(struct device *dev);
638extern int pm_generic_resume_early(struct device *dev);
610extern int pm_generic_resume_noirq(struct device *dev); 639extern int pm_generic_resume_noirq(struct device *dev);
611extern int pm_generic_resume(struct device *dev); 640extern int pm_generic_resume(struct device *dev);
612extern int pm_generic_freeze_noirq(struct device *dev); 641extern int pm_generic_freeze_noirq(struct device *dev);
642extern int pm_generic_freeze_late(struct device *dev);
613extern int pm_generic_freeze(struct device *dev); 643extern int pm_generic_freeze(struct device *dev);
614extern int pm_generic_thaw_noirq(struct device *dev); 644extern int pm_generic_thaw_noirq(struct device *dev);
645extern int pm_generic_thaw_early(struct device *dev);
615extern int pm_generic_thaw(struct device *dev); 646extern int pm_generic_thaw(struct device *dev);
616extern int pm_generic_restore_noirq(struct device *dev); 647extern int pm_generic_restore_noirq(struct device *dev);
648extern int pm_generic_restore_early(struct device *dev);
617extern int pm_generic_restore(struct device *dev); 649extern int pm_generic_restore(struct device *dev);
618extern int pm_generic_poweroff_noirq(struct device *dev); 650extern int pm_generic_poweroff_noirq(struct device *dev);
651extern int pm_generic_poweroff_late(struct device *dev);
619extern int pm_generic_poweroff(struct device *dev); 652extern int pm_generic_poweroff(struct device *dev);
620extern void pm_generic_complete(struct device *dev); 653extern void pm_generic_complete(struct device *dev);
621 654
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 91784a4f8608..ac1c114c499d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -42,8 +42,10 @@ enum suspend_stat_step {
42 SUSPEND_FREEZE = 1, 42 SUSPEND_FREEZE = 1,
43 SUSPEND_PREPARE, 43 SUSPEND_PREPARE,
44 SUSPEND_SUSPEND, 44 SUSPEND_SUSPEND,
45 SUSPEND_SUSPEND_LATE,
45 SUSPEND_SUSPEND_NOIRQ, 46 SUSPEND_SUSPEND_NOIRQ,
46 SUSPEND_RESUME_NOIRQ, 47 SUSPEND_RESUME_NOIRQ,
48 SUSPEND_RESUME_EARLY,
47 SUSPEND_RESUME 49 SUSPEND_RESUME
48}; 50};
49 51
@@ -53,8 +55,10 @@ struct suspend_stats {
53 int failed_freeze; 55 int failed_freeze;
54 int failed_prepare; 56 int failed_prepare;
55 int failed_suspend; 57 int failed_suspend;
58 int failed_suspend_late;
56 int failed_suspend_noirq; 59 int failed_suspend_noirq;
57 int failed_resume; 60 int failed_resume;
61 int failed_resume_early;
58 int failed_resume_noirq; 62 int failed_resume_noirq;
59#define REC_FAILED_NUM 2 63#define REC_FAILED_NUM 2
60 int last_failed_dev; 64 int last_failed_dev;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7b0886786701..a6a675cb9818 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1546,13 +1546,13 @@ int kernel_kexec(void)
1546 if (error) 1546 if (error)
1547 goto Resume_console; 1547 goto Resume_console;
1548 /* At this point, dpm_suspend_start() has been called, 1548 /* At this point, dpm_suspend_start() has been called,
1549 * but *not* dpm_suspend_noirq(). We *must* call 1549 * but *not* dpm_suspend_end(). We *must* call
1550 * dpm_suspend_noirq() now. Otherwise, drivers for 1550 * dpm_suspend_end() now. Otherwise, drivers for
1551 * some devices (e.g. interrupt controllers) become 1551 * some devices (e.g. interrupt controllers) become
1552 * desynchronized with the actual state of the 1552 * desynchronized with the actual state of the
1553 * hardware at resume time, and evil weirdness ensues. 1553 * hardware at resume time, and evil weirdness ensues.
1554 */ 1554 */
1555 error = dpm_suspend_noirq(PMSG_FREEZE); 1555 error = dpm_suspend_end(PMSG_FREEZE);
1556 if (error) 1556 if (error)
1557 goto Resume_devices; 1557 goto Resume_devices;
1558 error = disable_nonboot_cpus(); 1558 error = disable_nonboot_cpus();
@@ -1579,7 +1579,7 @@ int kernel_kexec(void)
1579 local_irq_enable(); 1579 local_irq_enable();
1580 Enable_cpus: 1580 Enable_cpus:
1581 enable_nonboot_cpus(); 1581 enable_nonboot_cpus();
1582 dpm_resume_noirq(PMSG_RESTORE); 1582 dpm_resume_start(PMSG_RESTORE);
1583 Resume_devices: 1583 Resume_devices:
1584 dpm_resume_end(PMSG_RESTORE); 1584 dpm_resume_end(PMSG_RESTORE);
1585 Resume_console: 1585 Resume_console:
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 6d6d28870335..a5d4cf0aa03e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -245,8 +245,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
245 * create_image - Create a hibernation image. 245 * create_image - Create a hibernation image.
246 * @platform_mode: Whether or not to use the platform driver. 246 * @platform_mode: Whether or not to use the platform driver.
247 * 247 *
248 * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image 248 * Execute device drivers' "late" and "noirq" freeze callbacks, create a
249 * and execute the drivers' .thaw_noirq() callbacks. 249 * hibernation image and run the drivers' "noirq" and "early" thaw callbacks.
250 * 250 *
251 * Control reappears in this routine after the subsequent restore. 251 * Control reappears in this routine after the subsequent restore.
252 */ 252 */
@@ -254,7 +254,7 @@ static int create_image(int platform_mode)
254{ 254{
255 int error; 255 int error;
256 256
257 error = dpm_suspend_noirq(PMSG_FREEZE); 257 error = dpm_suspend_end(PMSG_FREEZE);
258 if (error) { 258 if (error) {
259 printk(KERN_ERR "PM: Some devices failed to power down, " 259 printk(KERN_ERR "PM: Some devices failed to power down, "
260 "aborting hibernation\n"); 260 "aborting hibernation\n");
@@ -306,7 +306,7 @@ static int create_image(int platform_mode)
306 Platform_finish: 306 Platform_finish:
307 platform_finish(platform_mode); 307 platform_finish(platform_mode);
308 308
309 dpm_resume_noirq(in_suspend ? 309 dpm_resume_start(in_suspend ?
310 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 310 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
311 311
312 return error; 312 return error;
@@ -394,16 +394,16 @@ int hibernation_snapshot(int platform_mode)
394 * resume_target_kernel - Restore system state from a hibernation image. 394 * resume_target_kernel - Restore system state from a hibernation image.
395 * @platform_mode: Whether or not to use the platform driver. 395 * @platform_mode: Whether or not to use the platform driver.
396 * 396 *
397 * Execute device drivers' .freeze_noirq() callbacks, restore the contents of 397 * Execute device drivers' "noirq" and "late" freeze callbacks, restore the
398 * highmem that have not been restored yet from the image and run the low-level 398 * contents of highmem that have not been restored yet from the image and run
399 * code that will restore the remaining contents of memory and switch to the 399 * the low-level code that will restore the remaining contents of memory and
400 * just restored target kernel. 400 * switch to the just restored target kernel.
401 */ 401 */
402static int resume_target_kernel(bool platform_mode) 402static int resume_target_kernel(bool platform_mode)
403{ 403{
404 int error; 404 int error;
405 405
406 error = dpm_suspend_noirq(PMSG_QUIESCE); 406 error = dpm_suspend_end(PMSG_QUIESCE);
407 if (error) { 407 if (error) {
408 printk(KERN_ERR "PM: Some devices failed to power down, " 408 printk(KERN_ERR "PM: Some devices failed to power down, "
409 "aborting resume\n"); 409 "aborting resume\n");
@@ -460,7 +460,7 @@ static int resume_target_kernel(bool platform_mode)
460 Cleanup: 460 Cleanup:
461 platform_restore_cleanup(platform_mode); 461 platform_restore_cleanup(platform_mode);
462 462
463 dpm_resume_noirq(PMSG_RECOVER); 463 dpm_resume_start(PMSG_RECOVER);
464 464
465 return error; 465 return error;
466} 466}
@@ -518,7 +518,7 @@ int hibernation_platform_enter(void)
518 goto Resume_devices; 518 goto Resume_devices;
519 } 519 }
520 520
521 error = dpm_suspend_noirq(PMSG_HIBERNATE); 521 error = dpm_suspend_end(PMSG_HIBERNATE);
522 if (error) 522 if (error)
523 goto Resume_devices; 523 goto Resume_devices;
524 524
@@ -549,7 +549,7 @@ int hibernation_platform_enter(void)
549 Platform_finish: 549 Platform_finish:
550 hibernation_ops->finish(); 550 hibernation_ops->finish();
551 551
552 dpm_resume_noirq(PMSG_RESTORE); 552 dpm_resume_start(PMSG_RESTORE);
553 553
554 Resume_devices: 554 Resume_devices:
555 entering_platform_hibernation = false; 555 entering_platform_hibernation = false;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 9824b41e5a18..8c5014a4e052 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -165,16 +165,20 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
165 last_errno %= REC_FAILED_NUM; 165 last_errno %= REC_FAILED_NUM;
166 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 166 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
167 last_step %= REC_FAILED_NUM; 167 last_step %= REC_FAILED_NUM;
168 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n" 168 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
169 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n", 169 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
170 "success", suspend_stats.success, 170 "success", suspend_stats.success,
171 "fail", suspend_stats.fail, 171 "fail", suspend_stats.fail,
172 "failed_freeze", suspend_stats.failed_freeze, 172 "failed_freeze", suspend_stats.failed_freeze,
173 "failed_prepare", suspend_stats.failed_prepare, 173 "failed_prepare", suspend_stats.failed_prepare,
174 "failed_suspend", suspend_stats.failed_suspend, 174 "failed_suspend", suspend_stats.failed_suspend,
175 "failed_suspend_late",
176 suspend_stats.failed_suspend_late,
175 "failed_suspend_noirq", 177 "failed_suspend_noirq",
176 suspend_stats.failed_suspend_noirq, 178 suspend_stats.failed_suspend_noirq,
177 "failed_resume", suspend_stats.failed_resume, 179 "failed_resume", suspend_stats.failed_resume,
180 "failed_resume_early",
181 suspend_stats.failed_resume_early,
178 "failed_resume_noirq", 182 "failed_resume_noirq",
179 suspend_stats.failed_resume_noirq); 183 suspend_stats.failed_resume_noirq);
180 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 184 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4fd51beed879..560a639614a1 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -147,7 +147,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
147 goto Platform_finish; 147 goto Platform_finish;
148 } 148 }
149 149
150 error = dpm_suspend_noirq(PMSG_SUSPEND); 150 error = dpm_suspend_end(PMSG_SUSPEND);
151 if (error) { 151 if (error) {
152 printk(KERN_ERR "PM: Some devices failed to power down\n"); 152 printk(KERN_ERR "PM: Some devices failed to power down\n");
153 goto Platform_finish; 153 goto Platform_finish;
@@ -189,7 +189,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
189 if (suspend_ops->wake) 189 if (suspend_ops->wake)
190 suspend_ops->wake(); 190 suspend_ops->wake();
191 191
192 dpm_resume_noirq(PMSG_RESUME); 192 dpm_resume_start(PMSG_RESUME);
193 193
194 Platform_finish: 194 Platform_finish:
195 if (suspend_ops->finish) 195 if (suspend_ops->finish)