aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c675
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/trace.c4
3 files changed, 527 insertions, 154 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 45cc3d9eacb8..d571204aaff7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -12,11 +12,9 @@
12 * and add it to the list of power-controlled devices. sysfs entries for 12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added. 13 * controlling device power management will also be added.
14 * 14 *
15 * A different set of lists than the global subsystem list are used to 15 * A separate list is used for keeping track of power info, because the power
16 * keep track of power info because we use different lists to hold 16 * domain dependencies may differ from the ancestral dependencies that the
17 * devices based on what stage of the power management process they 17 * subsystem list maintains.
18 * are in. The power domain dependencies may also differ from the
19 * ancestral dependencies that the subsystem list maintains.
20 */ 18 */
21 19
22#include <linux/device.h> 20#include <linux/device.h>
@@ -30,31 +28,40 @@
30#include "power.h" 28#include "power.h"
31 29
32/* 30/*
33 * The entries in the dpm_active list are in a depth first order, simply 31 * The entries in the dpm_list list are in a depth first order, simply
34 * because children are guaranteed to be discovered after parents, and 32 * because children are guaranteed to be discovered after parents, and
35 * are inserted at the back of the list on discovery. 33 * are inserted at the back of the list on discovery.
36 * 34 *
37 * All the other lists are kept in the same order, for consistency.
38 * However the lists aren't always traversed in the same order.
39 * Semaphores must be acquired from the top (i.e., front) down
40 * and released in the opposite order. Devices must be suspended
41 * from the bottom (i.e., end) up and resumed in the opposite order.
42 * That way no parent will be suspended while it still has an active
43 * child.
44 *
45 * Since device_pm_add() may be called with a device semaphore held, 35 * Since device_pm_add() may be called with a device semaphore held,
46 * we must never try to acquire a device semaphore while holding 36 * we must never try to acquire a device semaphore while holding
47 * dpm_list_mutex. 37 * dpm_list_mutex.
48 */ 38 */
49 39
50LIST_HEAD(dpm_active); 40LIST_HEAD(dpm_list);
51static LIST_HEAD(dpm_off);
52static LIST_HEAD(dpm_off_irq);
53 41
54static DEFINE_MUTEX(dpm_list_mtx); 42static DEFINE_MUTEX(dpm_list_mtx);
55 43
56/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ 44/*
57static bool all_sleeping; 45 * Set once the preparation of devices for a PM transition has started, reset
46 * before starting to resume devices. Protected by dpm_list_mtx.
47 */
48static bool transition_started;
49
50/**
51 * device_pm_lock - lock the list of active devices used by the PM core
52 */
53void device_pm_lock(void)
54{
55 mutex_lock(&dpm_list_mtx);
56}
57
58/**
59 * device_pm_unlock - unlock the list of active devices used by the PM core
60 */
61void device_pm_unlock(void)
62{
63 mutex_unlock(&dpm_list_mtx);
64}
58 65
59/** 66/**
60 * device_pm_add - add a device to the list of active devices 67 * device_pm_add - add a device to the list of active devices
@@ -68,17 +75,25 @@ int device_pm_add(struct device *dev)
68 dev->bus ? dev->bus->name : "No Bus", 75 dev->bus ? dev->bus->name : "No Bus",
69 kobject_name(&dev->kobj)); 76 kobject_name(&dev->kobj));
70 mutex_lock(&dpm_list_mtx); 77 mutex_lock(&dpm_list_mtx);
71 if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { 78 if (dev->parent) {
72 if (dev->parent->power.sleeping) 79 if (dev->parent->power.status >= DPM_SUSPENDING) {
73 dev_warn(dev, "parent %s is sleeping\n", 80 dev_warn(dev, "parent %s is sleeping, will not add\n",
74 dev->parent->bus_id); 81 dev->parent->bus_id);
75 else 82 WARN_ON(true);
76 dev_warn(dev, "all devices are sleeping\n"); 83 }
84 } else if (transition_started) {
85 /*
86 * We refuse to register parentless devices while a PM
87 * transition is in progress in order to avoid leaving them
88 * unhandled down the road
89 */
77 WARN_ON(true); 90 WARN_ON(true);
78 } 91 }
79 error = dpm_sysfs_add(dev); 92 error = dpm_sysfs_add(dev);
80 if (!error) 93 if (!error) {
81 list_add_tail(&dev->power.entry, &dpm_active); 94 dev->power.status = DPM_ON;
95 list_add_tail(&dev->power.entry, &dpm_list);
96 }
82 mutex_unlock(&dpm_list_mtx); 97 mutex_unlock(&dpm_list_mtx);
83 return error; 98 return error;
84} 99}
@@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev)
100 mutex_unlock(&dpm_list_mtx); 115 mutex_unlock(&dpm_list_mtx);
101} 116}
102 117
118/**
119 * pm_op - execute the PM operation appropiate for given PM event
120 * @dev: Device.
121 * @ops: PM operations to choose from.
122 * @state: PM transition of the system being carried out.
123 */
124static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
125{
126 int error = 0;
127
128 switch (state.event) {
129#ifdef CONFIG_SUSPEND
130 case PM_EVENT_SUSPEND:
131 if (ops->suspend) {
132 error = ops->suspend(dev);
133 suspend_report_result(ops->suspend, error);
134 }
135 break;
136 case PM_EVENT_RESUME:
137 if (ops->resume) {
138 error = ops->resume(dev);
139 suspend_report_result(ops->resume, error);
140 }
141 break;
142#endif /* CONFIG_SUSPEND */
143#ifdef CONFIG_HIBERNATION
144 case PM_EVENT_FREEZE:
145 case PM_EVENT_QUIESCE:
146 if (ops->freeze) {
147 error = ops->freeze(dev);
148 suspend_report_result(ops->freeze, error);
149 }
150 break;
151 case PM_EVENT_HIBERNATE:
152 if (ops->poweroff) {
153 error = ops->poweroff(dev);
154 suspend_report_result(ops->poweroff, error);
155 }
156 break;
157 case PM_EVENT_THAW:
158 case PM_EVENT_RECOVER:
159 if (ops->thaw) {
160 error = ops->thaw(dev);
161 suspend_report_result(ops->thaw, error);
162 }
163 break;
164 case PM_EVENT_RESTORE:
165 if (ops->restore) {
166 error = ops->restore(dev);
167 suspend_report_result(ops->restore, error);
168 }
169 break;
170#endif /* CONFIG_HIBERNATION */
171 default:
172 error = -EINVAL;
173 }
174 return error;
175}
176
177/**
178 * pm_noirq_op - execute the PM operation appropiate for given PM event
179 * @dev: Device.
180 * @ops: PM operations to choose from.
181 * @state: PM transition of the system being carried out.
182 *
183 * The operation is executed with interrupts disabled by the only remaining
184 * functional CPU in the system.
185 */
186static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
187 pm_message_t state)
188{
189 int error = 0;
190
191 switch (state.event) {
192#ifdef CONFIG_SUSPEND
193 case PM_EVENT_SUSPEND:
194 if (ops->suspend_noirq) {
195 error = ops->suspend_noirq(dev);
196 suspend_report_result(ops->suspend_noirq, error);
197 }
198 break;
199 case PM_EVENT_RESUME:
200 if (ops->resume_noirq) {
201 error = ops->resume_noirq(dev);
202 suspend_report_result(ops->resume_noirq, error);
203 }
204 break;
205#endif /* CONFIG_SUSPEND */
206#ifdef CONFIG_HIBERNATION
207 case PM_EVENT_FREEZE:
208 case PM_EVENT_QUIESCE:
209 if (ops->freeze_noirq) {
210 error = ops->freeze_noirq(dev);
211 suspend_report_result(ops->freeze_noirq, error);
212 }
213 break;
214 case PM_EVENT_HIBERNATE:
215 if (ops->poweroff_noirq) {
216 error = ops->poweroff_noirq(dev);
217 suspend_report_result(ops->poweroff_noirq, error);
218 }
219 break;
220 case PM_EVENT_THAW:
221 case PM_EVENT_RECOVER:
222 if (ops->thaw_noirq) {
223 error = ops->thaw_noirq(dev);
224 suspend_report_result(ops->thaw_noirq, error);
225 }
226 break;
227 case PM_EVENT_RESTORE:
228 if (ops->restore_noirq) {
229 error = ops->restore_noirq(dev);
230 suspend_report_result(ops->restore_noirq, error);
231 }
232 break;
233#endif /* CONFIG_HIBERNATION */
234 default:
235 error = -EINVAL;
236 }
237 return error;
238}
239
240static char *pm_verb(int event)
241{
242 switch (event) {
243 case PM_EVENT_SUSPEND:
244 return "suspend";
245 case PM_EVENT_RESUME:
246 return "resume";
247 case PM_EVENT_FREEZE:
248 return "freeze";
249 case PM_EVENT_QUIESCE:
250 return "quiesce";
251 case PM_EVENT_HIBERNATE:
252 return "hibernate";
253 case PM_EVENT_THAW:
254 return "thaw";
255 case PM_EVENT_RESTORE:
256 return "restore";
257 case PM_EVENT_RECOVER:
258 return "recover";
259 default:
260 return "(unknown PM event)";
261 }
262}
263
264static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
265{
266 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
267 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
268 ", may wakeup" : "");
269}
270
271static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
272 int error)
273{
274 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
275 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
276}
277
103/*------------------------- Resume routines -------------------------*/ 278/*------------------------- Resume routines -------------------------*/
104 279
105/** 280/**
106 * resume_device_early - Power on one device (early resume). 281 * resume_device_noirq - Power on one device (early resume).
107 * @dev: Device. 282 * @dev: Device.
283 * @state: PM transition of the system being carried out.
108 * 284 *
109 * Must be called with interrupts disabled. 285 * Must be called with interrupts disabled.
110 */ 286 */
111static int resume_device_early(struct device *dev) 287static int resume_device_noirq(struct device *dev, pm_message_t state)
112{ 288{
113 int error = 0; 289 int error = 0;
114 290
115 TRACE_DEVICE(dev); 291 TRACE_DEVICE(dev);
116 TRACE_RESUME(0); 292 TRACE_RESUME(0);
117 293
118 if (dev->bus && dev->bus->resume_early) { 294 if (!dev->bus)
119 dev_dbg(dev, "EARLY resume\n"); 295 goto End;
296
297 if (dev->bus->pm) {
298 pm_dev_dbg(dev, state, "EARLY ");
299 error = pm_noirq_op(dev, dev->bus->pm, state);
300 } else if (dev->bus->resume_early) {
301 pm_dev_dbg(dev, state, "legacy EARLY ");
120 error = dev->bus->resume_early(dev); 302 error = dev->bus->resume_early(dev);
121 } 303 }
122 304 End:
123 TRACE_RESUME(error); 305 TRACE_RESUME(error);
124 return error; 306 return error;
125} 307}
126 308
127/** 309/**
128 * dpm_power_up - Power on all regular (non-sysdev) devices. 310 * dpm_power_up - Power on all regular (non-sysdev) devices.
311 * @state: PM transition of the system being carried out.
129 * 312 *
130 * Walk the dpm_off_irq list and power each device up. This 313 * Execute the appropriate "noirq resume" callback for all devices marked
131 * is used for devices that required they be powered down with 314 * as DPM_OFF_IRQ.
132 * interrupts disabled. As devices are powered on, they are moved
133 * to the dpm_off list.
134 * 315 *
135 * Must be called with interrupts disabled and only one CPU running. 316 * Must be called with interrupts disabled and only one CPU running.
136 */ 317 */
137static void dpm_power_up(void) 318static void dpm_power_up(pm_message_t state)
138{ 319{
320 struct device *dev;
139 321
140 while (!list_empty(&dpm_off_irq)) { 322 list_for_each_entry(dev, &dpm_list, power.entry)
141 struct list_head *entry = dpm_off_irq.next; 323 if (dev->power.status > DPM_OFF) {
142 struct device *dev = to_device(entry); 324 int error;
143 325
144 list_move_tail(entry, &dpm_off); 326 dev->power.status = DPM_OFF;
145 resume_device_early(dev); 327 error = resume_device_noirq(dev, state);
146 } 328 if (error)
329 pm_dev_err(dev, state, " early", error);
330 }
147} 331}
148 332
149/** 333/**
150 * device_power_up - Turn on all devices that need special attention. 334 * device_power_up - Turn on all devices that need special attention.
335 * @state: PM transition of the system being carried out.
151 * 336 *
152 * Power on system devices, then devices that required we shut them down 337 * Power on system devices, then devices that required we shut them down
153 * with interrupts disabled. 338 * with interrupts disabled.
154 * 339 *
155 * Must be called with interrupts disabled. 340 * Must be called with interrupts disabled.
156 */ 341 */
157void device_power_up(void) 342void device_power_up(pm_message_t state)
158{ 343{
159 sysdev_resume(); 344 sysdev_resume();
160 dpm_power_up(); 345 dpm_power_up(state);
161} 346}
162EXPORT_SYMBOL_GPL(device_power_up); 347EXPORT_SYMBOL_GPL(device_power_up);
163 348
164/** 349/**
165 * resume_device - Restore state for one device. 350 * resume_device - Restore state for one device.
166 * @dev: Device. 351 * @dev: Device.
167 * 352 * @state: PM transition of the system being carried out.
168 */ 353 */
169static int resume_device(struct device *dev) 354static int resume_device(struct device *dev, pm_message_t state)
170{ 355{
171 int error = 0; 356 int error = 0;
172 357
@@ -175,21 +360,40 @@ static int resume_device(struct device *dev)
175 360
176 down(&dev->sem); 361 down(&dev->sem);
177 362
178 if (dev->bus && dev->bus->resume) { 363 if (dev->bus) {
179 dev_dbg(dev,"resuming\n"); 364 if (dev->bus->pm) {
180 error = dev->bus->resume(dev); 365 pm_dev_dbg(dev, state, "");
366 error = pm_op(dev, &dev->bus->pm->base, state);
367 } else if (dev->bus->resume) {
368 pm_dev_dbg(dev, state, "legacy ");
369 error = dev->bus->resume(dev);
370 }
371 if (error)
372 goto End;
181 } 373 }
182 374
183 if (!error && dev->type && dev->type->resume) { 375 if (dev->type) {
184 dev_dbg(dev,"resuming\n"); 376 if (dev->type->pm) {
185 error = dev->type->resume(dev); 377 pm_dev_dbg(dev, state, "type ");
378 error = pm_op(dev, dev->type->pm, state);
379 } else if (dev->type->resume) {
380 pm_dev_dbg(dev, state, "legacy type ");
381 error = dev->type->resume(dev);
382 }
383 if (error)
384 goto End;
186 } 385 }
187 386
188 if (!error && dev->class && dev->class->resume) { 387 if (dev->class) {
189 dev_dbg(dev,"class resume\n"); 388 if (dev->class->pm) {
190 error = dev->class->resume(dev); 389 pm_dev_dbg(dev, state, "class ");
390 error = pm_op(dev, dev->class->pm, state);
391 } else if (dev->class->resume) {
392 pm_dev_dbg(dev, state, "legacy class ");
393 error = dev->class->resume(dev);
394 }
191 } 395 }
192 396 End:
193 up(&dev->sem); 397 up(&dev->sem);
194 398
195 TRACE_RESUME(error); 399 TRACE_RESUME(error);
@@ -198,78 +402,161 @@ static int resume_device(struct device *dev)
198 402
199/** 403/**
200 * dpm_resume - Resume every device. 404 * dpm_resume - Resume every device.
405 * @state: PM transition of the system being carried out.
201 * 406 *
202 * Resume the devices that have either not gone through 407 * Execute the appropriate "resume" callback for all devices the status of
203 * the late suspend, or that did go through it but also 408 * which indicates that they are inactive.
204 * went through the early resume. 409 */
410static void dpm_resume(pm_message_t state)
411{
412 struct list_head list;
413
414 INIT_LIST_HEAD(&list);
415 mutex_lock(&dpm_list_mtx);
416 transition_started = false;
417 while (!list_empty(&dpm_list)) {
418 struct device *dev = to_device(dpm_list.next);
419
420 get_device(dev);
421 if (dev->power.status >= DPM_OFF) {
422 int error;
423
424 dev->power.status = DPM_RESUMING;
425 mutex_unlock(&dpm_list_mtx);
426
427 error = resume_device(dev, state);
428
429 mutex_lock(&dpm_list_mtx);
430 if (error)
431 pm_dev_err(dev, state, "", error);
432 } else if (dev->power.status == DPM_SUSPENDING) {
433 /* Allow new children of the device to be registered */
434 dev->power.status = DPM_RESUMING;
435 }
436 if (!list_empty(&dev->power.entry))
437 list_move_tail(&dev->power.entry, &list);
438 put_device(dev);
439 }
440 list_splice(&list, &dpm_list);
441 mutex_unlock(&dpm_list_mtx);
442}
443
444/**
445 * complete_device - Complete a PM transition for given device
446 * @dev: Device.
447 * @state: PM transition of the system being carried out.
448 */
449static void complete_device(struct device *dev, pm_message_t state)
450{
451 down(&dev->sem);
452
453 if (dev->class && dev->class->pm && dev->class->pm->complete) {
454 pm_dev_dbg(dev, state, "completing class ");
455 dev->class->pm->complete(dev);
456 }
457
458 if (dev->type && dev->type->pm && dev->type->pm->complete) {
459 pm_dev_dbg(dev, state, "completing type ");
460 dev->type->pm->complete(dev);
461 }
462
463 if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
464 pm_dev_dbg(dev, state, "completing ");
465 dev->bus->pm->base.complete(dev);
466 }
467
468 up(&dev->sem);
469}
470
471/**
472 * dpm_complete - Complete a PM transition for all devices.
473 * @state: PM transition of the system being carried out.
205 * 474 *
206 * Take devices from the dpm_off_list, resume them, 475 * Execute the ->complete() callbacks for all devices that are not marked
207 * and put them on the dpm_locked list. 476 * as DPM_ON.
208 */ 477 */
209static void dpm_resume(void) 478static void dpm_complete(pm_message_t state)
210{ 479{
480 struct list_head list;
481
482 INIT_LIST_HEAD(&list);
211 mutex_lock(&dpm_list_mtx); 483 mutex_lock(&dpm_list_mtx);
212 all_sleeping = false; 484 while (!list_empty(&dpm_list)) {
213 while(!list_empty(&dpm_off)) { 485 struct device *dev = to_device(dpm_list.prev);
214 struct list_head *entry = dpm_off.next;
215 struct device *dev = to_device(entry);
216 486
217 list_move_tail(entry, &dpm_active); 487 get_device(dev);
218 dev->power.sleeping = false; 488 if (dev->power.status > DPM_ON) {
219 mutex_unlock(&dpm_list_mtx); 489 dev->power.status = DPM_ON;
220 resume_device(dev); 490 mutex_unlock(&dpm_list_mtx);
221 mutex_lock(&dpm_list_mtx); 491
492 complete_device(dev, state);
493
494 mutex_lock(&dpm_list_mtx);
495 }
496 if (!list_empty(&dev->power.entry))
497 list_move(&dev->power.entry, &list);
498 put_device(dev);
222 } 499 }
500 list_splice(&list, &dpm_list);
223 mutex_unlock(&dpm_list_mtx); 501 mutex_unlock(&dpm_list_mtx);
224} 502}
225 503
226/** 504/**
227 * device_resume - Restore state of each device in system. 505 * device_resume - Restore state of each device in system.
506 * @state: PM transition of the system being carried out.
228 * 507 *
229 * Resume all the devices, unlock them all, and allow new 508 * Resume all the devices, unlock them all, and allow new
230 * devices to be registered once again. 509 * devices to be registered once again.
231 */ 510 */
232void device_resume(void) 511void device_resume(pm_message_t state)
233{ 512{
234 might_sleep(); 513 might_sleep();
235 dpm_resume(); 514 dpm_resume(state);
515 dpm_complete(state);
236} 516}
237EXPORT_SYMBOL_GPL(device_resume); 517EXPORT_SYMBOL_GPL(device_resume);
238 518
239 519
240/*------------------------- Suspend routines -------------------------*/ 520/*------------------------- Suspend routines -------------------------*/
241 521
242static inline char *suspend_verb(u32 event) 522/**
523 * resume_event - return a PM message representing the resume event
524 * corresponding to given sleep state.
525 * @sleep_state: PM message representing a sleep state.
526 */
527static pm_message_t resume_event(pm_message_t sleep_state)
243{ 528{
244 switch (event) { 529 switch (sleep_state.event) {
245 case PM_EVENT_SUSPEND: return "suspend"; 530 case PM_EVENT_SUSPEND:
246 case PM_EVENT_FREEZE: return "freeze"; 531 return PMSG_RESUME;
247 case PM_EVENT_PRETHAW: return "prethaw"; 532 case PM_EVENT_FREEZE:
248 default: return "(unknown suspend event)"; 533 case PM_EVENT_QUIESCE:
534 return PMSG_RECOVER;
535 case PM_EVENT_HIBERNATE:
536 return PMSG_RESTORE;
249 } 537 }
250} 538 return PMSG_ON;
251
252static void
253suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
254{
255 dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
256 ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
257 ", may wakeup" : "");
258} 539}
259 540
260/** 541/**
261 * suspend_device_late - Shut down one device (late suspend). 542 * suspend_device_noirq - Shut down one device (late suspend).
262 * @dev: Device. 543 * @dev: Device.
263 * @state: Power state device is entering. 544 * @state: PM transition of the system being carried out.
264 * 545 *
265 * This is called with interrupts off and only a single CPU running. 546 * This is called with interrupts off and only a single CPU running.
266 */ 547 */
267static int suspend_device_late(struct device *dev, pm_message_t state) 548static int suspend_device_noirq(struct device *dev, pm_message_t state)
268{ 549{
269 int error = 0; 550 int error = 0;
270 551
271 if (dev->bus && dev->bus->suspend_late) { 552 if (!dev->bus)
272 suspend_device_dbg(dev, state, "LATE "); 553 return 0;
554
555 if (dev->bus->pm) {
556 pm_dev_dbg(dev, state, "LATE ");
557 error = pm_noirq_op(dev, dev->bus->pm, state);
558 } else if (dev->bus->suspend_late) {
559 pm_dev_dbg(dev, state, "legacy LATE ");
273 error = dev->bus->suspend_late(dev, state); 560 error = dev->bus->suspend_late(dev, state);
274 suspend_report_result(dev->bus->suspend_late, error); 561 suspend_report_result(dev->bus->suspend_late, error);
275 } 562 }
@@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state)
278 565
279/** 566/**
280 * device_power_down - Shut down special devices. 567 * device_power_down - Shut down special devices.
281 * @state: Power state to enter. 568 * @state: PM transition of the system being carried out.
282 * 569 *
283 * Power down devices that require interrupts to be disabled 570 * Power down devices that require interrupts to be disabled.
284 * and move them from the dpm_off list to the dpm_off_irq list.
285 * Then power down system devices. 571 * Then power down system devices.
286 * 572 *
287 * Must be called with interrupts disabled and only one CPU running. 573 * Must be called with interrupts disabled and only one CPU running.
288 */ 574 */
289int device_power_down(pm_message_t state) 575int device_power_down(pm_message_t state)
290{ 576{
577 struct device *dev;
291 int error = 0; 578 int error = 0;
292 579
293 while (!list_empty(&dpm_off)) { 580 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
294 struct list_head *entry = dpm_off.prev; 581 error = suspend_device_noirq(dev, state);
295 struct device *dev = to_device(entry);
296
297 error = suspend_device_late(dev, state);
298 if (error) { 582 if (error) {
299 printk(KERN_ERR "Could not power down device %s: " 583 pm_dev_err(dev, state, " late", error);
300 "error %d\n",
301 kobject_name(&dev->kobj), error);
302 break; 584 break;
303 } 585 }
304 if (!list_empty(&dev->power.entry)) 586 dev->power.status = DPM_OFF_IRQ;
305 list_move(&dev->power.entry, &dpm_off_irq);
306 } 587 }
307
308 if (!error) 588 if (!error)
309 error = sysdev_suspend(state); 589 error = sysdev_suspend(state);
310 if (error) 590 if (error)
311 dpm_power_up(); 591 dpm_power_up(resume_event(state));
312 return error; 592 return error;
313} 593}
314EXPORT_SYMBOL_GPL(device_power_down); 594EXPORT_SYMBOL_GPL(device_power_down);
@@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
316/** 596/**
317 * suspend_device - Save state of one device. 597 * suspend_device - Save state of one device.
318 * @dev: Device. 598 * @dev: Device.
319 * @state: Power state device is entering. 599 * @state: PM transition of the system being carried out.
320 */ 600 */
321static int suspend_device(struct device *dev, pm_message_t state) 601static int suspend_device(struct device *dev, pm_message_t state)
322{ 602{
@@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state)
324 604
325 down(&dev->sem); 605 down(&dev->sem);
326 606
327 if (dev->class && dev->class->suspend) { 607 if (dev->class) {
328 suspend_device_dbg(dev, state, "class "); 608 if (dev->class->pm) {
329 error = dev->class->suspend(dev, state); 609 pm_dev_dbg(dev, state, "class ");
330 suspend_report_result(dev->class->suspend, error); 610 error = pm_op(dev, dev->class->pm, state);
611 } else if (dev->class->suspend) {
612 pm_dev_dbg(dev, state, "legacy class ");
613 error = dev->class->suspend(dev, state);
614 suspend_report_result(dev->class->suspend, error);
615 }
616 if (error)
617 goto End;
331 } 618 }
332 619
333 if (!error && dev->type && dev->type->suspend) { 620 if (dev->type) {
334 suspend_device_dbg(dev, state, "type "); 621 if (dev->type->pm) {
335 error = dev->type->suspend(dev, state); 622 pm_dev_dbg(dev, state, "type ");
336 suspend_report_result(dev->type->suspend, error); 623 error = pm_op(dev, dev->type->pm, state);
624 } else if (dev->type->suspend) {
625 pm_dev_dbg(dev, state, "legacy type ");
626 error = dev->type->suspend(dev, state);
627 suspend_report_result(dev->type->suspend, error);
628 }
629 if (error)
630 goto End;
337 } 631 }
338 632
339 if (!error && dev->bus && dev->bus->suspend) { 633 if (dev->bus) {
340 suspend_device_dbg(dev, state, ""); 634 if (dev->bus->pm) {
341 error = dev->bus->suspend(dev, state); 635 pm_dev_dbg(dev, state, "");
342 suspend_report_result(dev->bus->suspend, error); 636 error = pm_op(dev, &dev->bus->pm->base, state);
637 } else if (dev->bus->suspend) {
638 pm_dev_dbg(dev, state, "legacy ");
639 error = dev->bus->suspend(dev, state);
640 suspend_report_result(dev->bus->suspend, error);
641 }
343 } 642 }
344 643 End:
345 up(&dev->sem); 644 up(&dev->sem);
346 645
347 return error; 646 return error;
@@ -349,67 +648,141 @@ static int suspend_device(struct device *dev, pm_message_t state)
349 648
350/** 649/**
351 * dpm_suspend - Suspend every device. 650 * dpm_suspend - Suspend every device.
352 * @state: Power state to put each device in. 651 * @state: PM transition of the system being carried out.
353 * 652 *
354 * Walk the dpm_locked list. Suspend each device and move it 653 * Execute the appropriate "suspend" callbacks for all devices.
355 * to the dpm_off list.
356 *
357 * (For historical reasons, if it returns -EAGAIN, that used to mean
358 * that the device would be called again with interrupts disabled.
359 * These days, we use the "suspend_late()" callback for that, so we
360 * print a warning and consider it an error).
361 */ 654 */
362static int dpm_suspend(pm_message_t state) 655static int dpm_suspend(pm_message_t state)
363{ 656{
657 struct list_head list;
364 int error = 0; 658 int error = 0;
365 659
660 INIT_LIST_HEAD(&list);
366 mutex_lock(&dpm_list_mtx); 661 mutex_lock(&dpm_list_mtx);
367 while (!list_empty(&dpm_active)) { 662 while (!list_empty(&dpm_list)) {
368 struct list_head *entry = dpm_active.prev; 663 struct device *dev = to_device(dpm_list.prev);
369 struct device *dev = to_device(entry);
370
371 WARN_ON(dev->parent && dev->parent->power.sleeping);
372 664
373 dev->power.sleeping = true; 665 get_device(dev);
374 mutex_unlock(&dpm_list_mtx); 666 mutex_unlock(&dpm_list_mtx);
667
375 error = suspend_device(dev, state); 668 error = suspend_device(dev, state);
669
376 mutex_lock(&dpm_list_mtx); 670 mutex_lock(&dpm_list_mtx);
377 if (error) { 671 if (error) {
378 printk(KERN_ERR "Could not suspend device %s: " 672 pm_dev_err(dev, state, "", error);
379 "error %d%s\n", 673 put_device(dev);
380 kobject_name(&dev->kobj),
381 error,
382 (error == -EAGAIN ?
383 " (please convert to suspend_late)" :
384 ""));
385 dev->power.sleeping = false;
386 break; 674 break;
387 } 675 }
676 dev->power.status = DPM_OFF;
388 if (!list_empty(&dev->power.entry)) 677 if (!list_empty(&dev->power.entry))
389 list_move(&dev->power.entry, &dpm_off); 678 list_move(&dev->power.entry, &list);
679 put_device(dev);
390 } 680 }
391 if (!error) 681 list_splice(&list, dpm_list.prev);
392 all_sleeping = true;
393 mutex_unlock(&dpm_list_mtx); 682 mutex_unlock(&dpm_list_mtx);
683 return error;
684}
685
686/**
687 * prepare_device - Execute the ->prepare() callback(s) for given device.
688 * @dev: Device.
689 * @state: PM transition of the system being carried out.
690 */
691static int prepare_device(struct device *dev, pm_message_t state)
692{
693 int error = 0;
694
695 down(&dev->sem);
696
697 if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
698 pm_dev_dbg(dev, state, "preparing ");
699 error = dev->bus->pm->base.prepare(dev);
700 suspend_report_result(dev->bus->pm->base.prepare, error);
701 if (error)
702 goto End;
703 }
704
705 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
706 pm_dev_dbg(dev, state, "preparing type ");
707 error = dev->type->pm->prepare(dev);
708 suspend_report_result(dev->type->pm->prepare, error);
709 if (error)
710 goto End;
711 }
712
713 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
714 pm_dev_dbg(dev, state, "preparing class ");
715 error = dev->class->pm->prepare(dev);
716 suspend_report_result(dev->class->pm->prepare, error);
717 }
718 End:
719 up(&dev->sem);
720
721 return error;
722}
394 723
724/**
725 * dpm_prepare - Prepare all devices for a PM transition.
726 * @state: PM transition of the system being carried out.
727 *
728 * Execute the ->prepare() callback for all devices.
729 */
730static int dpm_prepare(pm_message_t state)
731{
732 struct list_head list;
733 int error = 0;
734
735 INIT_LIST_HEAD(&list);
736 mutex_lock(&dpm_list_mtx);
737 transition_started = true;
738 while (!list_empty(&dpm_list)) {
739 struct device *dev = to_device(dpm_list.next);
740
741 get_device(dev);
742 dev->power.status = DPM_PREPARING;
743 mutex_unlock(&dpm_list_mtx);
744
745 error = prepare_device(dev, state);
746
747 mutex_lock(&dpm_list_mtx);
748 if (error) {
749 dev->power.status = DPM_ON;
750 if (error == -EAGAIN) {
751 put_device(dev);
752 continue;
753 }
754 printk(KERN_ERR "PM: Failed to prepare device %s "
755 "for power transition: error %d\n",
756 kobject_name(&dev->kobj), error);
757 put_device(dev);
758 break;
759 }
760 dev->power.status = DPM_SUSPENDING;
761 if (!list_empty(&dev->power.entry))
762 list_move_tail(&dev->power.entry, &list);
763 put_device(dev);
764 }
765 list_splice(&list, &dpm_list);
766 mutex_unlock(&dpm_list_mtx);
395 return error; 767 return error;
396} 768}
397 769
398/** 770/**
399 * device_suspend - Save state and stop all devices in system. 771 * device_suspend - Save state and stop all devices in system.
400 * @state: new power management state 772 * @state: PM transition of the system being carried out.
401 * 773 *
402 * Prevent new devices from being registered, then lock all devices 774 * Prepare and suspend all devices.
403 * and suspend them.
404 */ 775 */
405int device_suspend(pm_message_t state) 776int device_suspend(pm_message_t state)
406{ 777{
407 int error; 778 int error;
408 779
409 might_sleep(); 780 might_sleep();
410 error = dpm_suspend(state); 781 error = dpm_prepare(state);
782 if (!error)
783 error = dpm_suspend(state);
411 if (error) 784 if (error)
412 device_resume(); 785 device_resume(resume_event(state));
413 return error; 786 return error;
414} 787}
415EXPORT_SYMBOL_GPL(device_suspend); 788EXPORT_SYMBOL_GPL(device_suspend);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a6894f2a4b99..a3252c0e2887 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@
4 * main.c 4 * main.c
5 */ 5 */
6 6
7extern struct list_head dpm_active; /* The active device list */ 7extern struct list_head dpm_list; /* The active device list */
8 8
9static inline struct device *to_device(struct list_head *entry) 9static inline struct device *to_device(struct list_head *entry)
10{ 10{
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2b4b392dcbc1..8c1e656b5f8b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value)
188static int show_dev_hash(unsigned int value) 188static int show_dev_hash(unsigned int value)
189{ 189{
190 int match = 0; 190 int match = 0;
191 struct list_head * entry = dpm_active.prev; 191 struct list_head *entry = dpm_list.prev;
192 192
193 while (entry != &dpm_active) { 193 while (entry != &dpm_list) {
194 struct device * dev = to_device(entry); 194 struct device * dev = to_device(entry);
195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); 195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
196 if (hash == value) { 196 if (hash == value) {