diff options
-rw-r--r-- | arch/x86/kernel/apm_32.c | 8 | ||||
-rw-r--r-- | drivers/base/power/main.c | 675 | ||||
-rw-r--r-- | drivers/base/power/power.h | 2 | ||||
-rw-r--r-- | drivers/base/power/trace.c | 4 | ||||
-rw-r--r-- | include/linux/device.h | 9 | ||||
-rw-r--r-- | include/linux/pm.h | 314 | ||||
-rw-r--r-- | kernel/power/disk.c | 22 | ||||
-rw-r--r-- | kernel/power/main.c | 6 |
8 files changed, 845 insertions, 195 deletions
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index bf9290e29013..c1735f61a2c0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1211,9 +1211,9 @@ static int suspend(int vetoable) | |||
1211 | if (err != APM_SUCCESS) | 1211 | if (err != APM_SUCCESS) |
1212 | apm_error("suspend", err); | 1212 | apm_error("suspend", err); |
1213 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1213 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1214 | device_power_up(); | 1214 | device_power_up(PMSG_RESUME); |
1215 | local_irq_enable(); | 1215 | local_irq_enable(); |
1216 | device_resume(); | 1216 | device_resume(PMSG_RESUME); |
1217 | queue_event(APM_NORMAL_RESUME, NULL); | 1217 | queue_event(APM_NORMAL_RESUME, NULL); |
1218 | spin_lock(&user_list_lock); | 1218 | spin_lock(&user_list_lock); |
1219 | for (as = user_list; as != NULL; as = as->next) { | 1219 | for (as = user_list; as != NULL; as = as->next) { |
@@ -1238,7 +1238,7 @@ static void standby(void) | |||
1238 | apm_error("standby", err); | 1238 | apm_error("standby", err); |
1239 | 1239 | ||
1240 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | device_power_up(); | 1241 | device_power_up(PMSG_RESUME); |
1242 | local_irq_enable(); | 1242 | local_irq_enable(); |
1243 | } | 1243 | } |
1244 | 1244 | ||
@@ -1324,7 +1324,7 @@ static void check_events(void) | |||
1324 | ignore_bounce = 1; | 1324 | ignore_bounce = 1; |
1325 | if ((event != APM_NORMAL_RESUME) | 1325 | if ((event != APM_NORMAL_RESUME) |
1326 | || (ignore_normal_resume == 0)) { | 1326 | || (ignore_normal_resume == 0)) { |
1327 | device_resume(); | 1327 | device_resume(PMSG_RESUME); |
1328 | queue_event(event, NULL); | 1328 | queue_event(event, NULL); |
1329 | } | 1329 | } |
1330 | ignore_normal_resume = 0; | 1330 | ignore_normal_resume = 0; |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 45cc3d9eacb8..d571204aaff7 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -12,11 +12,9 @@ | |||
12 | * and add it to the list of power-controlled devices. sysfs entries for | 12 | * and add it to the list of power-controlled devices. sysfs entries for |
13 | * controlling device power management will also be added. | 13 | * controlling device power management will also be added. |
14 | * | 14 | * |
15 | * A different set of lists than the global subsystem list are used to | 15 | * A separate list is used for keeping track of power info, because the power |
16 | * keep track of power info because we use different lists to hold | 16 | * domain dependencies may differ from the ancestral dependencies that the |
17 | * devices based on what stage of the power management process they | 17 | * subsystem list maintains. |
18 | * are in. The power domain dependencies may also differ from the | ||
19 | * ancestral dependencies that the subsystem list maintains. | ||
20 | */ | 18 | */ |
21 | 19 | ||
22 | #include <linux/device.h> | 20 | #include <linux/device.h> |
@@ -30,31 +28,40 @@ | |||
30 | #include "power.h" | 28 | #include "power.h" |
31 | 29 | ||
32 | /* | 30 | /* |
33 | * The entries in the dpm_active list are in a depth first order, simply | 31 | * The entries in the dpm_list list are in a depth first order, simply |
34 | * because children are guaranteed to be discovered after parents, and | 32 | * because children are guaranteed to be discovered after parents, and |
35 | * are inserted at the back of the list on discovery. | 33 | * are inserted at the back of the list on discovery. |
36 | * | 34 | * |
37 | * All the other lists are kept in the same order, for consistency. | ||
38 | * However the lists aren't always traversed in the same order. | ||
39 | * Semaphores must be acquired from the top (i.e., front) down | ||
40 | * and released in the opposite order. Devices must be suspended | ||
41 | * from the bottom (i.e., end) up and resumed in the opposite order. | ||
42 | * That way no parent will be suspended while it still has an active | ||
43 | * child. | ||
44 | * | ||
45 | * Since device_pm_add() may be called with a device semaphore held, | 35 | * Since device_pm_add() may be called with a device semaphore held, |
46 | * we must never try to acquire a device semaphore while holding | 36 | * we must never try to acquire a device semaphore while holding |
47 | * dpm_list_mutex. | 37 | * dpm_list_mutex. |
48 | */ | 38 | */ |
49 | 39 | ||
50 | LIST_HEAD(dpm_active); | 40 | LIST_HEAD(dpm_list); |
51 | static LIST_HEAD(dpm_off); | ||
52 | static LIST_HEAD(dpm_off_irq); | ||
53 | 41 | ||
54 | static DEFINE_MUTEX(dpm_list_mtx); | 42 | static DEFINE_MUTEX(dpm_list_mtx); |
55 | 43 | ||
56 | /* 'true' if all devices have been suspended, protected by dpm_list_mtx */ | 44 | /* |
57 | static bool all_sleeping; | 45 | * Set once the preparation of devices for a PM transition has started, reset |
46 | * before starting to resume devices. Protected by dpm_list_mtx. | ||
47 | */ | ||
48 | static bool transition_started; | ||
49 | |||
50 | /** | ||
51 | * device_pm_lock - lock the list of active devices used by the PM core | ||
52 | */ | ||
53 | void device_pm_lock(void) | ||
54 | { | ||
55 | mutex_lock(&dpm_list_mtx); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * device_pm_unlock - unlock the list of active devices used by the PM core | ||
60 | */ | ||
61 | void device_pm_unlock(void) | ||
62 | { | ||
63 | mutex_unlock(&dpm_list_mtx); | ||
64 | } | ||
58 | 65 | ||
59 | /** | 66 | /** |
60 | * device_pm_add - add a device to the list of active devices | 67 | * device_pm_add - add a device to the list of active devices |
@@ -68,17 +75,25 @@ int device_pm_add(struct device *dev) | |||
68 | dev->bus ? dev->bus->name : "No Bus", | 75 | dev->bus ? dev->bus->name : "No Bus", |
69 | kobject_name(&dev->kobj)); | 76 | kobject_name(&dev->kobj)); |
70 | mutex_lock(&dpm_list_mtx); | 77 | mutex_lock(&dpm_list_mtx); |
71 | if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { | 78 | if (dev->parent) { |
72 | if (dev->parent->power.sleeping) | 79 | if (dev->parent->power.status >= DPM_SUSPENDING) { |
73 | dev_warn(dev, "parent %s is sleeping\n", | 80 | dev_warn(dev, "parent %s is sleeping, will not add\n", |
74 | dev->parent->bus_id); | 81 | dev->parent->bus_id); |
75 | else | 82 | WARN_ON(true); |
76 | dev_warn(dev, "all devices are sleeping\n"); | 83 | } |
84 | } else if (transition_started) { | ||
85 | /* | ||
86 | * We refuse to register parentless devices while a PM | ||
87 | * transition is in progress in order to avoid leaving them | ||
88 | * unhandled down the road | ||
89 | */ | ||
77 | WARN_ON(true); | 90 | WARN_ON(true); |
78 | } | 91 | } |
79 | error = dpm_sysfs_add(dev); | 92 | error = dpm_sysfs_add(dev); |
80 | if (!error) | 93 | if (!error) { |
81 | list_add_tail(&dev->power.entry, &dpm_active); | 94 | dev->power.status = DPM_ON; |
95 | list_add_tail(&dev->power.entry, &dpm_list); | ||
96 | } | ||
82 | mutex_unlock(&dpm_list_mtx); | 97 | mutex_unlock(&dpm_list_mtx); |
83 | return error; | 98 | return error; |
84 | } | 99 | } |
@@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev) | |||
100 | mutex_unlock(&dpm_list_mtx); | 115 | mutex_unlock(&dpm_list_mtx); |
101 | } | 116 | } |
102 | 117 | ||
118 | /** | ||
119 | * pm_op - execute the PM operation appropiate for given PM event | ||
120 | * @dev: Device. | ||
121 | * @ops: PM operations to choose from. | ||
122 | * @state: PM transition of the system being carried out. | ||
123 | */ | ||
124 | static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state) | ||
125 | { | ||
126 | int error = 0; | ||
127 | |||
128 | switch (state.event) { | ||
129 | #ifdef CONFIG_SUSPEND | ||
130 | case PM_EVENT_SUSPEND: | ||
131 | if (ops->suspend) { | ||
132 | error = ops->suspend(dev); | ||
133 | suspend_report_result(ops->suspend, error); | ||
134 | } | ||
135 | break; | ||
136 | case PM_EVENT_RESUME: | ||
137 | if (ops->resume) { | ||
138 | error = ops->resume(dev); | ||
139 | suspend_report_result(ops->resume, error); | ||
140 | } | ||
141 | break; | ||
142 | #endif /* CONFIG_SUSPEND */ | ||
143 | #ifdef CONFIG_HIBERNATION | ||
144 | case PM_EVENT_FREEZE: | ||
145 | case PM_EVENT_QUIESCE: | ||
146 | if (ops->freeze) { | ||
147 | error = ops->freeze(dev); | ||
148 | suspend_report_result(ops->freeze, error); | ||
149 | } | ||
150 | break; | ||
151 | case PM_EVENT_HIBERNATE: | ||
152 | if (ops->poweroff) { | ||
153 | error = ops->poweroff(dev); | ||
154 | suspend_report_result(ops->poweroff, error); | ||
155 | } | ||
156 | break; | ||
157 | case PM_EVENT_THAW: | ||
158 | case PM_EVENT_RECOVER: | ||
159 | if (ops->thaw) { | ||
160 | error = ops->thaw(dev); | ||
161 | suspend_report_result(ops->thaw, error); | ||
162 | } | ||
163 | break; | ||
164 | case PM_EVENT_RESTORE: | ||
165 | if (ops->restore) { | ||
166 | error = ops->restore(dev); | ||
167 | suspend_report_result(ops->restore, error); | ||
168 | } | ||
169 | break; | ||
170 | #endif /* CONFIG_HIBERNATION */ | ||
171 | default: | ||
172 | error = -EINVAL; | ||
173 | } | ||
174 | return error; | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * pm_noirq_op - execute the PM operation appropiate for given PM event | ||
179 | * @dev: Device. | ||
180 | * @ops: PM operations to choose from. | ||
181 | * @state: PM transition of the system being carried out. | ||
182 | * | ||
183 | * The operation is executed with interrupts disabled by the only remaining | ||
184 | * functional CPU in the system. | ||
185 | */ | ||
186 | static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops, | ||
187 | pm_message_t state) | ||
188 | { | ||
189 | int error = 0; | ||
190 | |||
191 | switch (state.event) { | ||
192 | #ifdef CONFIG_SUSPEND | ||
193 | case PM_EVENT_SUSPEND: | ||
194 | if (ops->suspend_noirq) { | ||
195 | error = ops->suspend_noirq(dev); | ||
196 | suspend_report_result(ops->suspend_noirq, error); | ||
197 | } | ||
198 | break; | ||
199 | case PM_EVENT_RESUME: | ||
200 | if (ops->resume_noirq) { | ||
201 | error = ops->resume_noirq(dev); | ||
202 | suspend_report_result(ops->resume_noirq, error); | ||
203 | } | ||
204 | break; | ||
205 | #endif /* CONFIG_SUSPEND */ | ||
206 | #ifdef CONFIG_HIBERNATION | ||
207 | case PM_EVENT_FREEZE: | ||
208 | case PM_EVENT_QUIESCE: | ||
209 | if (ops->freeze_noirq) { | ||
210 | error = ops->freeze_noirq(dev); | ||
211 | suspend_report_result(ops->freeze_noirq, error); | ||
212 | } | ||
213 | break; | ||
214 | case PM_EVENT_HIBERNATE: | ||
215 | if (ops->poweroff_noirq) { | ||
216 | error = ops->poweroff_noirq(dev); | ||
217 | suspend_report_result(ops->poweroff_noirq, error); | ||
218 | } | ||
219 | break; | ||
220 | case PM_EVENT_THAW: | ||
221 | case PM_EVENT_RECOVER: | ||
222 | if (ops->thaw_noirq) { | ||
223 | error = ops->thaw_noirq(dev); | ||
224 | suspend_report_result(ops->thaw_noirq, error); | ||
225 | } | ||
226 | break; | ||
227 | case PM_EVENT_RESTORE: | ||
228 | if (ops->restore_noirq) { | ||
229 | error = ops->restore_noirq(dev); | ||
230 | suspend_report_result(ops->restore_noirq, error); | ||
231 | } | ||
232 | break; | ||
233 | #endif /* CONFIG_HIBERNATION */ | ||
234 | default: | ||
235 | error = -EINVAL; | ||
236 | } | ||
237 | return error; | ||
238 | } | ||
239 | |||
240 | static char *pm_verb(int event) | ||
241 | { | ||
242 | switch (event) { | ||
243 | case PM_EVENT_SUSPEND: | ||
244 | return "suspend"; | ||
245 | case PM_EVENT_RESUME: | ||
246 | return "resume"; | ||
247 | case PM_EVENT_FREEZE: | ||
248 | return "freeze"; | ||
249 | case PM_EVENT_QUIESCE: | ||
250 | return "quiesce"; | ||
251 | case PM_EVENT_HIBERNATE: | ||
252 | return "hibernate"; | ||
253 | case PM_EVENT_THAW: | ||
254 | return "thaw"; | ||
255 | case PM_EVENT_RESTORE: | ||
256 | return "restore"; | ||
257 | case PM_EVENT_RECOVER: | ||
258 | return "recover"; | ||
259 | default: | ||
260 | return "(unknown PM event)"; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) | ||
265 | { | ||
266 | dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), | ||
267 | ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? | ||
268 | ", may wakeup" : ""); | ||
269 | } | ||
270 | |||
271 | static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | ||
272 | int error) | ||
273 | { | ||
274 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | ||
275 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | ||
276 | } | ||
277 | |||
103 | /*------------------------- Resume routines -------------------------*/ | 278 | /*------------------------- Resume routines -------------------------*/ |
104 | 279 | ||
105 | /** | 280 | /** |
106 | * resume_device_early - Power on one device (early resume). | 281 | * resume_device_noirq - Power on one device (early resume). |
107 | * @dev: Device. | 282 | * @dev: Device. |
283 | * @state: PM transition of the system being carried out. | ||
108 | * | 284 | * |
109 | * Must be called with interrupts disabled. | 285 | * Must be called with interrupts disabled. |
110 | */ | 286 | */ |
111 | static int resume_device_early(struct device *dev) | 287 | static int resume_device_noirq(struct device *dev, pm_message_t state) |
112 | { | 288 | { |
113 | int error = 0; | 289 | int error = 0; |
114 | 290 | ||
115 | TRACE_DEVICE(dev); | 291 | TRACE_DEVICE(dev); |
116 | TRACE_RESUME(0); | 292 | TRACE_RESUME(0); |
117 | 293 | ||
118 | if (dev->bus && dev->bus->resume_early) { | 294 | if (!dev->bus) |
119 | dev_dbg(dev, "EARLY resume\n"); | 295 | goto End; |
296 | |||
297 | if (dev->bus->pm) { | ||
298 | pm_dev_dbg(dev, state, "EARLY "); | ||
299 | error = pm_noirq_op(dev, dev->bus->pm, state); | ||
300 | } else if (dev->bus->resume_early) { | ||
301 | pm_dev_dbg(dev, state, "legacy EARLY "); | ||
120 | error = dev->bus->resume_early(dev); | 302 | error = dev->bus->resume_early(dev); |
121 | } | 303 | } |
122 | 304 | End: | |
123 | TRACE_RESUME(error); | 305 | TRACE_RESUME(error); |
124 | return error; | 306 | return error; |
125 | } | 307 | } |
126 | 308 | ||
127 | /** | 309 | /** |
128 | * dpm_power_up - Power on all regular (non-sysdev) devices. | 310 | * dpm_power_up - Power on all regular (non-sysdev) devices. |
311 | * @state: PM transition of the system being carried out. | ||
129 | * | 312 | * |
130 | * Walk the dpm_off_irq list and power each device up. This | 313 | * Execute the appropriate "noirq resume" callback for all devices marked |
131 | * is used for devices that required they be powered down with | 314 | * as DPM_OFF_IRQ. |
132 | * interrupts disabled. As devices are powered on, they are moved | ||
133 | * to the dpm_off list. | ||
134 | * | 315 | * |
135 | * Must be called with interrupts disabled and only one CPU running. | 316 | * Must be called with interrupts disabled and only one CPU running. |
136 | */ | 317 | */ |
137 | static void dpm_power_up(void) | 318 | static void dpm_power_up(pm_message_t state) |
138 | { | 319 | { |
320 | struct device *dev; | ||
139 | 321 | ||
140 | while (!list_empty(&dpm_off_irq)) { | 322 | list_for_each_entry(dev, &dpm_list, power.entry) |
141 | struct list_head *entry = dpm_off_irq.next; | 323 | if (dev->power.status > DPM_OFF) { |
142 | struct device *dev = to_device(entry); | 324 | int error; |
143 | 325 | ||
144 | list_move_tail(entry, &dpm_off); | 326 | dev->power.status = DPM_OFF; |
145 | resume_device_early(dev); | 327 | error = resume_device_noirq(dev, state); |
146 | } | 328 | if (error) |
329 | pm_dev_err(dev, state, " early", error); | ||
330 | } | ||
147 | } | 331 | } |
148 | 332 | ||
149 | /** | 333 | /** |
150 | * device_power_up - Turn on all devices that need special attention. | 334 | * device_power_up - Turn on all devices that need special attention. |
335 | * @state: PM transition of the system being carried out. | ||
151 | * | 336 | * |
152 | * Power on system devices, then devices that required we shut them down | 337 | * Power on system devices, then devices that required we shut them down |
153 | * with interrupts disabled. | 338 | * with interrupts disabled. |
154 | * | 339 | * |
155 | * Must be called with interrupts disabled. | 340 | * Must be called with interrupts disabled. |
156 | */ | 341 | */ |
157 | void device_power_up(void) | 342 | void device_power_up(pm_message_t state) |
158 | { | 343 | { |
159 | sysdev_resume(); | 344 | sysdev_resume(); |
160 | dpm_power_up(); | 345 | dpm_power_up(state); |
161 | } | 346 | } |
162 | EXPORT_SYMBOL_GPL(device_power_up); | 347 | EXPORT_SYMBOL_GPL(device_power_up); |
163 | 348 | ||
164 | /** | 349 | /** |
165 | * resume_device - Restore state for one device. | 350 | * resume_device - Restore state for one device. |
166 | * @dev: Device. | 351 | * @dev: Device. |
167 | * | 352 | * @state: PM transition of the system being carried out. |
168 | */ | 353 | */ |
169 | static int resume_device(struct device *dev) | 354 | static int resume_device(struct device *dev, pm_message_t state) |
170 | { | 355 | { |
171 | int error = 0; | 356 | int error = 0; |
172 | 357 | ||
@@ -175,21 +360,40 @@ static int resume_device(struct device *dev) | |||
175 | 360 | ||
176 | down(&dev->sem); | 361 | down(&dev->sem); |
177 | 362 | ||
178 | if (dev->bus && dev->bus->resume) { | 363 | if (dev->bus) { |
179 | dev_dbg(dev,"resuming\n"); | 364 | if (dev->bus->pm) { |
180 | error = dev->bus->resume(dev); | 365 | pm_dev_dbg(dev, state, ""); |
366 | error = pm_op(dev, &dev->bus->pm->base, state); | ||
367 | } else if (dev->bus->resume) { | ||
368 | pm_dev_dbg(dev, state, "legacy "); | ||
369 | error = dev->bus->resume(dev); | ||
370 | } | ||
371 | if (error) | ||
372 | goto End; | ||
181 | } | 373 | } |
182 | 374 | ||
183 | if (!error && dev->type && dev->type->resume) { | 375 | if (dev->type) { |
184 | dev_dbg(dev,"resuming\n"); | 376 | if (dev->type->pm) { |
185 | error = dev->type->resume(dev); | 377 | pm_dev_dbg(dev, state, "type "); |
378 | error = pm_op(dev, dev->type->pm, state); | ||
379 | } else if (dev->type->resume) { | ||
380 | pm_dev_dbg(dev, state, "legacy type "); | ||
381 | error = dev->type->resume(dev); | ||
382 | } | ||
383 | if (error) | ||
384 | goto End; | ||
186 | } | 385 | } |
187 | 386 | ||
188 | if (!error && dev->class && dev->class->resume) { | 387 | if (dev->class) { |
189 | dev_dbg(dev,"class resume\n"); | 388 | if (dev->class->pm) { |
190 | error = dev->class->resume(dev); | 389 | pm_dev_dbg(dev, state, "class "); |
390 | error = pm_op(dev, dev->class->pm, state); | ||
391 | } else if (dev->class->resume) { | ||
392 | pm_dev_dbg(dev, state, "legacy class "); | ||
393 | error = dev->class->resume(dev); | ||
394 | } | ||
191 | } | 395 | } |
192 | 396 | End: | |
193 | up(&dev->sem); | 397 | up(&dev->sem); |
194 | 398 | ||
195 | TRACE_RESUME(error); | 399 | TRACE_RESUME(error); |
@@ -198,78 +402,161 @@ static int resume_device(struct device *dev) | |||
198 | 402 | ||
199 | /** | 403 | /** |
200 | * dpm_resume - Resume every device. | 404 | * dpm_resume - Resume every device. |
405 | * @state: PM transition of the system being carried out. | ||
201 | * | 406 | * |
202 | * Resume the devices that have either not gone through | 407 | * Execute the appropriate "resume" callback for all devices the status of |
203 | * the late suspend, or that did go through it but also | 408 | * which indicates that they are inactive. |
204 | * went through the early resume. | 409 | */ |
410 | static void dpm_resume(pm_message_t state) | ||
411 | { | ||
412 | struct list_head list; | ||
413 | |||
414 | INIT_LIST_HEAD(&list); | ||
415 | mutex_lock(&dpm_list_mtx); | ||
416 | transition_started = false; | ||
417 | while (!list_empty(&dpm_list)) { | ||
418 | struct device *dev = to_device(dpm_list.next); | ||
419 | |||
420 | get_device(dev); | ||
421 | if (dev->power.status >= DPM_OFF) { | ||
422 | int error; | ||
423 | |||
424 | dev->power.status = DPM_RESUMING; | ||
425 | mutex_unlock(&dpm_list_mtx); | ||
426 | |||
427 | error = resume_device(dev, state); | ||
428 | |||
429 | mutex_lock(&dpm_list_mtx); | ||
430 | if (error) | ||
431 | pm_dev_err(dev, state, "", error); | ||
432 | } else if (dev->power.status == DPM_SUSPENDING) { | ||
433 | /* Allow new children of the device to be registered */ | ||
434 | dev->power.status = DPM_RESUMING; | ||
435 | } | ||
436 | if (!list_empty(&dev->power.entry)) | ||
437 | list_move_tail(&dev->power.entry, &list); | ||
438 | put_device(dev); | ||
439 | } | ||
440 | list_splice(&list, &dpm_list); | ||
441 | mutex_unlock(&dpm_list_mtx); | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * complete_device - Complete a PM transition for given device | ||
446 | * @dev: Device. | ||
447 | * @state: PM transition of the system being carried out. | ||
448 | */ | ||
449 | static void complete_device(struct device *dev, pm_message_t state) | ||
450 | { | ||
451 | down(&dev->sem); | ||
452 | |||
453 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | ||
454 | pm_dev_dbg(dev, state, "completing class "); | ||
455 | dev->class->pm->complete(dev); | ||
456 | } | ||
457 | |||
458 | if (dev->type && dev->type->pm && dev->type->pm->complete) { | ||
459 | pm_dev_dbg(dev, state, "completing type "); | ||
460 | dev->type->pm->complete(dev); | ||
461 | } | ||
462 | |||
463 | if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) { | ||
464 | pm_dev_dbg(dev, state, "completing "); | ||
465 | dev->bus->pm->base.complete(dev); | ||
466 | } | ||
467 | |||
468 | up(&dev->sem); | ||
469 | } | ||
470 | |||
471 | /** | ||
472 | * dpm_complete - Complete a PM transition for all devices. | ||
473 | * @state: PM transition of the system being carried out. | ||
205 | * | 474 | * |
206 | * Take devices from the dpm_off_list, resume them, | 475 | * Execute the ->complete() callbacks for all devices that are not marked |
207 | * and put them on the dpm_locked list. | 476 | * as DPM_ON. |
208 | */ | 477 | */ |
209 | static void dpm_resume(void) | 478 | static void dpm_complete(pm_message_t state) |
210 | { | 479 | { |
480 | struct list_head list; | ||
481 | |||
482 | INIT_LIST_HEAD(&list); | ||
211 | mutex_lock(&dpm_list_mtx); | 483 | mutex_lock(&dpm_list_mtx); |
212 | all_sleeping = false; | 484 | while (!list_empty(&dpm_list)) { |
213 | while(!list_empty(&dpm_off)) { | 485 | struct device *dev = to_device(dpm_list.prev); |
214 | struct list_head *entry = dpm_off.next; | ||
215 | struct device *dev = to_device(entry); | ||
216 | 486 | ||
217 | list_move_tail(entry, &dpm_active); | 487 | get_device(dev); |
218 | dev->power.sleeping = false; | 488 | if (dev->power.status > DPM_ON) { |
219 | mutex_unlock(&dpm_list_mtx); | 489 | dev->power.status = DPM_ON; |
220 | resume_device(dev); | 490 | mutex_unlock(&dpm_list_mtx); |
221 | mutex_lock(&dpm_list_mtx); | 491 | |
492 | complete_device(dev, state); | ||
493 | |||
494 | mutex_lock(&dpm_list_mtx); | ||
495 | } | ||
496 | if (!list_empty(&dev->power.entry)) | ||
497 | list_move(&dev->power.entry, &list); | ||
498 | put_device(dev); | ||
222 | } | 499 | } |
500 | list_splice(&list, &dpm_list); | ||
223 | mutex_unlock(&dpm_list_mtx); | 501 | mutex_unlock(&dpm_list_mtx); |
224 | } | 502 | } |
225 | 503 | ||
226 | /** | 504 | /** |
227 | * device_resume - Restore state of each device in system. | 505 | * device_resume - Restore state of each device in system. |
506 | * @state: PM transition of the system being carried out. | ||
228 | * | 507 | * |
229 | * Resume all the devices, unlock them all, and allow new | 508 | * Resume all the devices, unlock them all, and allow new |
230 | * devices to be registered once again. | 509 | * devices to be registered once again. |
231 | */ | 510 | */ |
232 | void device_resume(void) | 511 | void device_resume(pm_message_t state) |
233 | { | 512 | { |
234 | might_sleep(); | 513 | might_sleep(); |
235 | dpm_resume(); | 514 | dpm_resume(state); |
515 | dpm_complete(state); | ||
236 | } | 516 | } |
237 | EXPORT_SYMBOL_GPL(device_resume); | 517 | EXPORT_SYMBOL_GPL(device_resume); |
238 | 518 | ||
239 | 519 | ||
240 | /*------------------------- Suspend routines -------------------------*/ | 520 | /*------------------------- Suspend routines -------------------------*/ |
241 | 521 | ||
242 | static inline char *suspend_verb(u32 event) | 522 | /** |
523 | * resume_event - return a PM message representing the resume event | ||
524 | * corresponding to given sleep state. | ||
525 | * @sleep_state: PM message representing a sleep state. | ||
526 | */ | ||
527 | static pm_message_t resume_event(pm_message_t sleep_state) | ||
243 | { | 528 | { |
244 | switch (event) { | 529 | switch (sleep_state.event) { |
245 | case PM_EVENT_SUSPEND: return "suspend"; | 530 | case PM_EVENT_SUSPEND: |
246 | case PM_EVENT_FREEZE: return "freeze"; | 531 | return PMSG_RESUME; |
247 | case PM_EVENT_PRETHAW: return "prethaw"; | 532 | case PM_EVENT_FREEZE: |
248 | default: return "(unknown suspend event)"; | 533 | case PM_EVENT_QUIESCE: |
534 | return PMSG_RECOVER; | ||
535 | case PM_EVENT_HIBERNATE: | ||
536 | return PMSG_RESTORE; | ||
249 | } | 537 | } |
250 | } | 538 | return PMSG_ON; |
251 | |||
252 | static void | ||
253 | suspend_device_dbg(struct device *dev, pm_message_t state, char *info) | ||
254 | { | ||
255 | dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event), | ||
256 | ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ? | ||
257 | ", may wakeup" : ""); | ||
258 | } | 539 | } |
259 | 540 | ||
260 | /** | 541 | /** |
261 | * suspend_device_late - Shut down one device (late suspend). | 542 | * suspend_device_noirq - Shut down one device (late suspend). |
262 | * @dev: Device. | 543 | * @dev: Device. |
263 | * @state: Power state device is entering. | 544 | * @state: PM transition of the system being carried out. |
264 | * | 545 | * |
265 | * This is called with interrupts off and only a single CPU running. | 546 | * This is called with interrupts off and only a single CPU running. |
266 | */ | 547 | */ |
267 | static int suspend_device_late(struct device *dev, pm_message_t state) | 548 | static int suspend_device_noirq(struct device *dev, pm_message_t state) |
268 | { | 549 | { |
269 | int error = 0; | 550 | int error = 0; |
270 | 551 | ||
271 | if (dev->bus && dev->bus->suspend_late) { | 552 | if (!dev->bus) |
272 | suspend_device_dbg(dev, state, "LATE "); | 553 | return 0; |
554 | |||
555 | if (dev->bus->pm) { | ||
556 | pm_dev_dbg(dev, state, "LATE "); | ||
557 | error = pm_noirq_op(dev, dev->bus->pm, state); | ||
558 | } else if (dev->bus->suspend_late) { | ||
559 | pm_dev_dbg(dev, state, "legacy LATE "); | ||
273 | error = dev->bus->suspend_late(dev, state); | 560 | error = dev->bus->suspend_late(dev, state); |
274 | suspend_report_result(dev->bus->suspend_late, error); | 561 | suspend_report_result(dev->bus->suspend_late, error); |
275 | } | 562 | } |
@@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state) | |||
278 | 565 | ||
279 | /** | 566 | /** |
280 | * device_power_down - Shut down special devices. | 567 | * device_power_down - Shut down special devices. |
281 | * @state: Power state to enter. | 568 | * @state: PM transition of the system being carried out. |
282 | * | 569 | * |
283 | * Power down devices that require interrupts to be disabled | 570 | * Power down devices that require interrupts to be disabled. |
284 | * and move them from the dpm_off list to the dpm_off_irq list. | ||
285 | * Then power down system devices. | 571 | * Then power down system devices. |
286 | * | 572 | * |
287 | * Must be called with interrupts disabled and only one CPU running. | 573 | * Must be called with interrupts disabled and only one CPU running. |
288 | */ | 574 | */ |
289 | int device_power_down(pm_message_t state) | 575 | int device_power_down(pm_message_t state) |
290 | { | 576 | { |
577 | struct device *dev; | ||
291 | int error = 0; | 578 | int error = 0; |
292 | 579 | ||
293 | while (!list_empty(&dpm_off)) { | 580 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { |
294 | struct list_head *entry = dpm_off.prev; | 581 | error = suspend_device_noirq(dev, state); |
295 | struct device *dev = to_device(entry); | ||
296 | |||
297 | error = suspend_device_late(dev, state); | ||
298 | if (error) { | 582 | if (error) { |
299 | printk(KERN_ERR "Could not power down device %s: " | 583 | pm_dev_err(dev, state, " late", error); |
300 | "error %d\n", | ||
301 | kobject_name(&dev->kobj), error); | ||
302 | break; | 584 | break; |
303 | } | 585 | } |
304 | if (!list_empty(&dev->power.entry)) | 586 | dev->power.status = DPM_OFF_IRQ; |
305 | list_move(&dev->power.entry, &dpm_off_irq); | ||
306 | } | 587 | } |
307 | |||
308 | if (!error) | 588 | if (!error) |
309 | error = sysdev_suspend(state); | 589 | error = sysdev_suspend(state); |
310 | if (error) | 590 | if (error) |
311 | dpm_power_up(); | 591 | dpm_power_up(resume_event(state)); |
312 | return error; | 592 | return error; |
313 | } | 593 | } |
314 | EXPORT_SYMBOL_GPL(device_power_down); | 594 | EXPORT_SYMBOL_GPL(device_power_down); |
@@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down); | |||
316 | /** | 596 | /** |
317 | * suspend_device - Save state of one device. | 597 | * suspend_device - Save state of one device. |
318 | * @dev: Device. | 598 | * @dev: Device. |
319 | * @state: Power state device is entering. | 599 | * @state: PM transition of the system being carried out. |
320 | */ | 600 | */ |
321 | static int suspend_device(struct device *dev, pm_message_t state) | 601 | static int suspend_device(struct device *dev, pm_message_t state) |
322 | { | 602 | { |
@@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
324 | 604 | ||
325 | down(&dev->sem); | 605 | down(&dev->sem); |
326 | 606 | ||
327 | if (dev->class && dev->class->suspend) { | 607 | if (dev->class) { |
328 | suspend_device_dbg(dev, state, "class "); | 608 | if (dev->class->pm) { |
329 | error = dev->class->suspend(dev, state); | 609 | pm_dev_dbg(dev, state, "class "); |
330 | suspend_report_result(dev->class->suspend, error); | 610 | error = pm_op(dev, dev->class->pm, state); |
611 | } else if (dev->class->suspend) { | ||
612 | pm_dev_dbg(dev, state, "legacy class "); | ||
613 | error = dev->class->suspend(dev, state); | ||
614 | suspend_report_result(dev->class->suspend, error); | ||
615 | } | ||
616 | if (error) | ||
617 | goto End; | ||
331 | } | 618 | } |
332 | 619 | ||
333 | if (!error && dev->type && dev->type->suspend) { | 620 | if (dev->type) { |
334 | suspend_device_dbg(dev, state, "type "); | 621 | if (dev->type->pm) { |
335 | error = dev->type->suspend(dev, state); | 622 | pm_dev_dbg(dev, state, "type "); |
336 | suspend_report_result(dev->type->suspend, error); | 623 | error = pm_op(dev, dev->type->pm, state); |
624 | } else if (dev->type->suspend) { | ||
625 | pm_dev_dbg(dev, state, "legacy type "); | ||
626 | error = dev->type->suspend(dev, state); | ||
627 | suspend_report_result(dev->type->suspend, error); | ||
628 | } | ||
629 | if (error) | ||
630 | goto End; | ||
337 | } | 631 | } |
338 | 632 | ||
339 | if (!error && dev->bus && dev->bus->suspend) { | 633 | if (dev->bus) { |
340 | suspend_device_dbg(dev, state, ""); | 634 | if (dev->bus->pm) { |
341 | error = dev->bus->suspend(dev, state); | 635 | pm_dev_dbg(dev, state, ""); |
342 | suspend_report_result(dev->bus->suspend, error); | 636 | error = pm_op(dev, &dev->bus->pm->base, state); |
637 | } else if (dev->bus->suspend) { | ||
638 | pm_dev_dbg(dev, state, "legacy "); | ||
639 | error = dev->bus->suspend(dev, state); | ||
640 | suspend_report_result(dev->bus->suspend, error); | ||
641 | } | ||
343 | } | 642 | } |
344 | 643 | End: | |
345 | up(&dev->sem); | 644 | up(&dev->sem); |
346 | 645 | ||
347 | return error; | 646 | return error; |
@@ -349,67 +648,141 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
349 | 648 | ||
350 | /** | 649 | /** |
351 | * dpm_suspend - Suspend every device. | 650 | * dpm_suspend - Suspend every device. |
352 | * @state: Power state to put each device in. | 651 | * @state: PM transition of the system being carried out. |
353 | * | 652 | * |
354 | * Walk the dpm_locked list. Suspend each device and move it | 653 | * Execute the appropriate "suspend" callbacks for all devices. |
355 | * to the dpm_off list. | ||
356 | * | ||
357 | * (For historical reasons, if it returns -EAGAIN, that used to mean | ||
358 | * that the device would be called again with interrupts disabled. | ||
359 | * These days, we use the "suspend_late()" callback for that, so we | ||
360 | * print a warning and consider it an error). | ||
361 | */ | 654 | */ |
362 | static int dpm_suspend(pm_message_t state) | 655 | static int dpm_suspend(pm_message_t state) |
363 | { | 656 | { |
657 | struct list_head list; | ||
364 | int error = 0; | 658 | int error = 0; |
365 | 659 | ||
660 | INIT_LIST_HEAD(&list); | ||
366 | mutex_lock(&dpm_list_mtx); | 661 | mutex_lock(&dpm_list_mtx); |
367 | while (!list_empty(&dpm_active)) { | 662 | while (!list_empty(&dpm_list)) { |
368 | struct list_head *entry = dpm_active.prev; | 663 | struct device *dev = to_device(dpm_list.prev); |
369 | struct device *dev = to_device(entry); | ||
370 | |||
371 | WARN_ON(dev->parent && dev->parent->power.sleeping); | ||
372 | 664 | ||
373 | dev->power.sleeping = true; | 665 | get_device(dev); |
374 | mutex_unlock(&dpm_list_mtx); | 666 | mutex_unlock(&dpm_list_mtx); |
667 | |||
375 | error = suspend_device(dev, state); | 668 | error = suspend_device(dev, state); |
669 | |||
376 | mutex_lock(&dpm_list_mtx); | 670 | mutex_lock(&dpm_list_mtx); |
377 | if (error) { | 671 | if (error) { |
378 | printk(KERN_ERR "Could not suspend device %s: " | 672 | pm_dev_err(dev, state, "", error); |
379 | "error %d%s\n", | 673 | put_device(dev); |
380 | kobject_name(&dev->kobj), | ||
381 | error, | ||
382 | (error == -EAGAIN ? | ||
383 | " (please convert to suspend_late)" : | ||
384 | "")); | ||
385 | dev->power.sleeping = false; | ||
386 | break; | 674 | break; |
387 | } | 675 | } |
676 | dev->power.status = DPM_OFF; | ||
388 | if (!list_empty(&dev->power.entry)) | 677 | if (!list_empty(&dev->power.entry)) |
389 | list_move(&dev->power.entry, &dpm_off); | 678 | list_move(&dev->power.entry, &list); |
679 | put_device(dev); | ||
390 | } | 680 | } |
391 | if (!error) | 681 | list_splice(&list, dpm_list.prev); |
392 | all_sleeping = true; | ||
393 | mutex_unlock(&dpm_list_mtx); | 682 | mutex_unlock(&dpm_list_mtx); |
683 | return error; | ||
684 | } | ||
685 | |||
686 | /** | ||
687 | * prepare_device - Execute the ->prepare() callback(s) for given device. | ||
688 | * @dev: Device. | ||
689 | * @state: PM transition of the system being carried out. | ||
690 | */ | ||
691 | static int prepare_device(struct device *dev, pm_message_t state) | ||
692 | { | ||
693 | int error = 0; | ||
694 | |||
695 | down(&dev->sem); | ||
696 | |||
697 | if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) { | ||
698 | pm_dev_dbg(dev, state, "preparing "); | ||
699 | error = dev->bus->pm->base.prepare(dev); | ||
700 | suspend_report_result(dev->bus->pm->base.prepare, error); | ||
701 | if (error) | ||
702 | goto End; | ||
703 | } | ||
704 | |||
705 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { | ||
706 | pm_dev_dbg(dev, state, "preparing type "); | ||
707 | error = dev->type->pm->prepare(dev); | ||
708 | suspend_report_result(dev->type->pm->prepare, error); | ||
709 | if (error) | ||
710 | goto End; | ||
711 | } | ||
712 | |||
713 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { | ||
714 | pm_dev_dbg(dev, state, "preparing class "); | ||
715 | error = dev->class->pm->prepare(dev); | ||
716 | suspend_report_result(dev->class->pm->prepare, error); | ||
717 | } | ||
718 | End: | ||
719 | up(&dev->sem); | ||
720 | |||
721 | return error; | ||
722 | } | ||
394 | 723 | ||
724 | /** | ||
725 | * dpm_prepare - Prepare all devices for a PM transition. | ||
726 | * @state: PM transition of the system being carried out. | ||
727 | * | ||
728 | * Execute the ->prepare() callback for all devices. | ||
729 | */ | ||
730 | static int dpm_prepare(pm_message_t state) | ||
731 | { | ||
732 | struct list_head list; | ||
733 | int error = 0; | ||
734 | |||
735 | INIT_LIST_HEAD(&list); | ||
736 | mutex_lock(&dpm_list_mtx); | ||
737 | transition_started = true; | ||
738 | while (!list_empty(&dpm_list)) { | ||
739 | struct device *dev = to_device(dpm_list.next); | ||
740 | |||
741 | get_device(dev); | ||
742 | dev->power.status = DPM_PREPARING; | ||
743 | mutex_unlock(&dpm_list_mtx); | ||
744 | |||
745 | error = prepare_device(dev, state); | ||
746 | |||
747 | mutex_lock(&dpm_list_mtx); | ||
748 | if (error) { | ||
749 | dev->power.status = DPM_ON; | ||
750 | if (error == -EAGAIN) { | ||
751 | put_device(dev); | ||
752 | continue; | ||
753 | } | ||
754 | printk(KERN_ERR "PM: Failed to prepare device %s " | ||
755 | "for power transition: error %d\n", | ||
756 | kobject_name(&dev->kobj), error); | ||
757 | put_device(dev); | ||
758 | break; | ||
759 | } | ||
760 | dev->power.status = DPM_SUSPENDING; | ||
761 | if (!list_empty(&dev->power.entry)) | ||
762 | list_move_tail(&dev->power.entry, &list); | ||
763 | put_device(dev); | ||
764 | } | ||
765 | list_splice(&list, &dpm_list); | ||
766 | mutex_unlock(&dpm_list_mtx); | ||
395 | return error; | 767 | return error; |
396 | } | 768 | } |
397 | 769 | ||
398 | /** | 770 | /** |
399 | * device_suspend - Save state and stop all devices in system. | 771 | * device_suspend - Save state and stop all devices in system. |
400 | * @state: new power management state | 772 | * @state: PM transition of the system being carried out. |
401 | * | 773 | * |
402 | * Prevent new devices from being registered, then lock all devices | 774 | * Prepare and suspend all devices. |
403 | * and suspend them. | ||
404 | */ | 775 | */ |
405 | int device_suspend(pm_message_t state) | 776 | int device_suspend(pm_message_t state) |
406 | { | 777 | { |
407 | int error; | 778 | int error; |
408 | 779 | ||
409 | might_sleep(); | 780 | might_sleep(); |
410 | error = dpm_suspend(state); | 781 | error = dpm_prepare(state); |
782 | if (!error) | ||
783 | error = dpm_suspend(state); | ||
411 | if (error) | 784 | if (error) |
412 | device_resume(); | 785 | device_resume(resume_event(state)); |
413 | return error; | 786 | return error; |
414 | } | 787 | } |
415 | EXPORT_SYMBOL_GPL(device_suspend); | 788 | EXPORT_SYMBOL_GPL(device_suspend); |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index a6894f2a4b99..a3252c0e2887 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * main.c | 4 | * main.c |
5 | */ | 5 | */ |
6 | 6 | ||
7 | extern struct list_head dpm_active; /* The active device list */ | 7 | extern struct list_head dpm_list; /* The active device list */ |
8 | 8 | ||
9 | static inline struct device *to_device(struct list_head *entry) | 9 | static inline struct device *to_device(struct list_head *entry) |
10 | { | 10 | { |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 2b4b392dcbc1..8c1e656b5f8b 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
@@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value) | |||
188 | static int show_dev_hash(unsigned int value) | 188 | static int show_dev_hash(unsigned int value) |
189 | { | 189 | { |
190 | int match = 0; | 190 | int match = 0; |
191 | struct list_head * entry = dpm_active.prev; | 191 | struct list_head *entry = dpm_list.prev; |
192 | 192 | ||
193 | while (entry != &dpm_active) { | 193 | while (entry != &dpm_list) { |
194 | struct device * dev = to_device(entry); | 194 | struct device * dev = to_device(entry); |
195 | unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); | 195 | unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); |
196 | if (hash == value) { | 196 | if (hash == value) { |
diff --git a/include/linux/device.h b/include/linux/device.h index 6a2d04c011bc..f71a78d123ae 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -68,6 +68,8 @@ struct bus_type { | |||
68 | int (*resume_early)(struct device *dev); | 68 | int (*resume_early)(struct device *dev); |
69 | int (*resume)(struct device *dev); | 69 | int (*resume)(struct device *dev); |
70 | 70 | ||
71 | struct pm_ext_ops *pm; | ||
72 | |||
71 | struct bus_type_private *p; | 73 | struct bus_type_private *p; |
72 | }; | 74 | }; |
73 | 75 | ||
@@ -131,6 +133,8 @@ struct device_driver { | |||
131 | int (*resume) (struct device *dev); | 133 | int (*resume) (struct device *dev); |
132 | struct attribute_group **groups; | 134 | struct attribute_group **groups; |
133 | 135 | ||
136 | struct pm_ops *pm; | ||
137 | |||
134 | struct driver_private *p; | 138 | struct driver_private *p; |
135 | }; | 139 | }; |
136 | 140 | ||
@@ -197,6 +201,8 @@ struct class { | |||
197 | 201 | ||
198 | int (*suspend)(struct device *dev, pm_message_t state); | 202 | int (*suspend)(struct device *dev, pm_message_t state); |
199 | int (*resume)(struct device *dev); | 203 | int (*resume)(struct device *dev); |
204 | |||
205 | struct pm_ops *pm; | ||
200 | }; | 206 | }; |
201 | 207 | ||
202 | extern int __must_check class_register(struct class *class); | 208 | extern int __must_check class_register(struct class *class); |
@@ -248,8 +254,11 @@ struct device_type { | |||
248 | struct attribute_group **groups; | 254 | struct attribute_group **groups; |
249 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); | 255 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); |
250 | void (*release)(struct device *dev); | 256 | void (*release)(struct device *dev); |
257 | |||
251 | int (*suspend)(struct device *dev, pm_message_t state); | 258 | int (*suspend)(struct device *dev, pm_message_t state); |
252 | int (*resume)(struct device *dev); | 259 | int (*resume)(struct device *dev); |
260 | |||
261 | struct pm_ops *pm; | ||
253 | }; | 262 | }; |
254 | 263 | ||
255 | /* interface for exporting device attributes */ | 264 | /* interface for exporting device attributes */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 39a7ee859b67..4ad9de94449a 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -112,7 +112,9 @@ typedef struct pm_message { | |||
112 | int event; | 112 | int event; |
113 | } pm_message_t; | 113 | } pm_message_t; |
114 | 114 | ||
115 | /* | 115 | /** |
116 | * struct pm_ops - device PM callbacks | ||
117 | * | ||
116 | * Several driver power state transitions are externally visible, affecting | 118 | * Several driver power state transitions are externally visible, affecting |
117 | * the state of pending I/O queues and (for drivers that touch hardware) | 119 | * the state of pending I/O queues and (for drivers that touch hardware) |
118 | * interrupts, wakeups, DMA, and other hardware state. There may also be | 120 | * interrupts, wakeups, DMA, and other hardware state. There may also be |
@@ -120,6 +122,284 @@ typedef struct pm_message { | |||
120 | * to the rest of the driver stack (such as a driver that's ON gating off | 122 | * to the rest of the driver stack (such as a driver that's ON gating off |
121 | * clocks which are not in active use). | 123 | * clocks which are not in active use). |
122 | * | 124 | * |
125 | * The externally visible transitions are handled with the help of the following | ||
126 | * callbacks included in this structure: | ||
127 | * | ||
128 | * @prepare: Prepare the device for the upcoming transition, but do NOT change | ||
129 | * its hardware state. Prevent new children of the device from being | ||
130 | * registered after @prepare() returns (the driver's subsystem and | ||
131 | * generally the rest of the kernel is supposed to prevent new calls to the | ||
132 | * probe method from being made too once @prepare() has succeeded). If | ||
133 | * @prepare() detects a situation it cannot handle (e.g. registration of a | ||
134 | * child already in progress), it may return -EAGAIN, so that the PM core | ||
135 | * can execute it once again (e.g. after the new child has been registered) | ||
136 | * to recover from the race condition. This method is executed for all | ||
137 | * kinds of suspend transitions and is followed by one of the suspend | ||
138 | * callbacks: @suspend(), @freeze(), or @poweroff(). | ||
139 | * The PM core executes @prepare() for all devices before starting to | ||
140 | * execute suspend callbacks for any of them, so drivers may assume all of | ||
141 | * the other devices to be present and functional while @prepare() is being | ||
142 | * executed. In particular, it is safe to make GFP_KERNEL memory | ||
143 | * allocations from within @prepare(). However, drivers may NOT assume | ||
144 | * anything about the availability of the user space at that time and it | ||
145 | * is not correct to request firmware from within @prepare() (it's too | ||
146 | * late to do that). [To work around this limitation, drivers may | ||
147 | * register suspend and hibernation notifiers that are executed before the | ||
148 | * freezing of tasks.] | ||
149 | * | ||
150 | * @complete: Undo the changes made by @prepare(). This method is executed for | ||
151 | * all kinds of resume transitions, following one of the resume callbacks: | ||
152 | * @resume(), @thaw(), @restore(). Also called if the state transition | ||
153 | * fails before the driver's suspend callback (@suspend(), @freeze(), | ||
154 | * @poweroff()) can be executed (e.g. if the suspend callback fails for one | ||
155 | * of the other devices that the PM core has unsuccessfully attempted to | ||
156 | * suspend earlier). | ||
157 | * The PM core executes @complete() after it has executed the appropriate | ||
158 | * resume callback for all devices. | ||
159 | * | ||
160 | * @suspend: Executed before putting the system into a sleep state in which the | ||
161 | * contents of main memory are preserved. Quiesce the device, put it into | ||
162 | * a low power state appropriate for the upcoming system state (such as | ||
163 | * PCI_D3hot), and enable wakeup events as appropriate. | ||
164 | * | ||
165 | * @resume: Executed after waking the system up from a sleep state in which the | ||
166 | * contents of main memory were preserved. Put the device into the | ||
167 | * appropriate state, according to the information saved in memory by the | ||
168 | * preceding @suspend(). The driver starts working again, responding to | ||
169 | * hardware events and software requests. The hardware may have gone | ||
170 | * through a power-off reset, or it may have maintained state from the | ||
171 | * previous suspend() which the driver may rely on while resuming. On most | ||
172 | * platforms, there are no restrictions on availability of resources like | ||
173 | * clocks during @resume(). | ||
174 | * | ||
175 | * @freeze: Hibernation-specific, executed before creating a hibernation image. | ||
176 | * Quiesce operations so that a consistent image can be created, but do NOT | ||
177 | * otherwise put the device into a low power device state and do NOT emit | ||
178 | * system wakeup events. Save in main memory the device settings to be | ||
179 | * used by @restore() during the subsequent resume from hibernation or by | ||
180 | * the subsequent @thaw(), if the creation of the image or the restoration | ||
181 | * of main memory contents from it fails. | ||
182 | * | ||
183 | * @thaw: Hibernation-specific, executed after creating a hibernation image OR | ||
184 | * if the creation of the image fails. Also executed after a failing | ||
185 | * attempt to restore the contents of main memory from such an image. | ||
186 | * Undo the changes made by the preceding @freeze(), so the device can be | ||
187 | * operated in the same way as immediately before the call to @freeze(). | ||
188 | * | ||
189 | * @poweroff: Hibernation-specific, executed after saving a hibernation image. | ||
190 | * Quiesce the device, put it into a low power state appropriate for the | ||
191 | * upcoming system state (such as PCI_D3hot), and enable wakeup events as | ||
192 | * appropriate. | ||
193 | * | ||
194 | * @restore: Hibernation-specific, executed after restoring the contents of main | ||
195 | * memory from a hibernation image. Driver starts working again, | ||
196 | * responding to hardware events and software requests. Drivers may NOT | ||
197 | * make ANY assumptions about the hardware state right prior to @restore(). | ||
198 | * On most platforms, there are no restrictions on availability of | ||
199 | * resources like clocks during @restore(). | ||
200 | * | ||
201 | * All of the above callbacks, except for @complete(), return error codes. | ||
202 | * However, the error codes returned by the resume operations, @resume(), | ||
203 | * @thaw(), and @restore(), do not cause the PM core to abort the resume | ||
204 | * transition during which they are returned. The error codes returned in | ||
205 | * that cases are only printed by the PM core to the system logs for debugging | ||
206 | * purposes. Still, it is recommended that drivers only return error codes | ||
207 | * from their resume methods in case of an unrecoverable failure (i.e. when the | ||
208 | * device being handled refuses to resume and becomes unusable) to allow us to | ||
209 | * modify the PM core in the future, so that it can avoid attempting to handle | ||
210 | * devices that failed to resume and their children. | ||
211 | * | ||
212 | * It is allowed to unregister devices while the above callbacks are being | ||
213 | * executed. However, it is not allowed to unregister a device from within any | ||
214 | * of its own callbacks. | ||
215 | */ | ||
216 | |||
217 | struct pm_ops { | ||
218 | int (*prepare)(struct device *dev); | ||
219 | void (*complete)(struct device *dev); | ||
220 | int (*suspend)(struct device *dev); | ||
221 | int (*resume)(struct device *dev); | ||
222 | int (*freeze)(struct device *dev); | ||
223 | int (*thaw)(struct device *dev); | ||
224 | int (*poweroff)(struct device *dev); | ||
225 | int (*restore)(struct device *dev); | ||
226 | }; | ||
227 | |||
228 | /** | ||
229 | * struct pm_ext_ops - extended device PM callbacks | ||
230 | * | ||
231 | * Some devices require certain operations related to suspend and hibernation | ||
232 | * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below | ||
233 | * is defined, adding callbacks to be executed with interrupts disabled to | ||
234 | * 'struct pm_ops'. | ||
235 | * | ||
236 | * The following callbacks included in 'struct pm_ext_ops' are executed with | ||
237 | * the nonboot CPUs switched off and with interrupts disabled on the only | ||
238 | * functional CPU. They also are executed with the PM core list of devices | ||
239 | * locked, so they must NOT unregister any devices. | ||
240 | * | ||
241 | * @suspend_noirq: Complete the operations of ->suspend() by carrying out any | ||
242 | * actions required for suspending the device that need interrupts to be | ||
243 | * disabled | ||
244 | * | ||
245 | * @resume_noirq: Prepare for the execution of ->resume() by carrying out any | ||
246 | * actions required for resuming the device that need interrupts to be | ||
247 | * disabled | ||
248 | * | ||
249 | * @freeze_noirq: Complete the operations of ->freeze() by carrying out any | ||
250 | * actions required for freezing the device that need interrupts to be | ||
251 | * disabled | ||
252 | * | ||
253 | * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any | ||
254 | * actions required for thawing the device that need interrupts to be | ||
255 | * disabled | ||
256 | * | ||
257 | * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any | ||
258 | * actions required for handling the device that need interrupts to be | ||
259 | * disabled | ||
260 | * | ||
261 | * @restore_noirq: Prepare for the execution of ->restore() by carrying out any | ||
262 | * actions required for restoring the operations of the device that need | ||
263 | * interrupts to be disabled | ||
264 | * | ||
265 | * All of the above callbacks return error codes, but the error codes returned | ||
266 | * by the resume operations, @resume_noirq(), @thaw_noirq(), and | ||
267 | * @restore_noirq(), do not cause the PM core to abort the resume transition | ||
268 | * during which they are returned. The error codes returned in that cases are | ||
269 | * only printed by the PM core to the system logs for debugging purposes. | ||
270 | * Still, as stated above, it is recommended that drivers only return error | ||
271 | * codes from their resume methods if the device being handled fails to resume | ||
272 | * and is not usable any more. | ||
273 | */ | ||
274 | |||
275 | struct pm_ext_ops { | ||
276 | struct pm_ops base; | ||
277 | int (*suspend_noirq)(struct device *dev); | ||
278 | int (*resume_noirq)(struct device *dev); | ||
279 | int (*freeze_noirq)(struct device *dev); | ||
280 | int (*thaw_noirq)(struct device *dev); | ||
281 | int (*poweroff_noirq)(struct device *dev); | ||
282 | int (*restore_noirq)(struct device *dev); | ||
283 | }; | ||
284 | |||
285 | /** | ||
286 | * PM_EVENT_ messages | ||
287 | * | ||
288 | * The following PM_EVENT_ messages are defined for the internal use of the PM | ||
289 | * core, in order to provide a mechanism allowing the high level suspend and | ||
290 | * hibernation code to convey the necessary information to the device PM core | ||
291 | * code: | ||
292 | * | ||
293 | * ON No transition. | ||
294 | * | ||
295 | * FREEZE System is going to hibernate, call ->prepare() and ->freeze() | ||
296 | * for all devices. | ||
297 | * | ||
298 | * SUSPEND System is going to suspend, call ->prepare() and ->suspend() | ||
299 | * for all devices. | ||
300 | * | ||
301 | * HIBERNATE Hibernation image has been saved, call ->prepare() and | ||
302 | * ->poweroff() for all devices. | ||
303 | * | ||
304 | * QUIESCE Contents of main memory are going to be restored from a (loaded) | ||
305 | * hibernation image, call ->prepare() and ->freeze() for all | ||
306 | * devices. | ||
307 | * | ||
308 | * RESUME System is resuming, call ->resume() and ->complete() for all | ||
309 | * devices. | ||
310 | * | ||
311 | * THAW Hibernation image has been created, call ->thaw() and | ||
312 | * ->complete() for all devices. | ||
313 | * | ||
314 | * RESTORE Contents of main memory have been restored from a hibernation | ||
315 | * image, call ->restore() and ->complete() for all devices. | ||
316 | * | ||
317 | * RECOVER Creation of a hibernation image or restoration of the main | ||
318 | * memory contents from a hibernation image has failed, call | ||
319 | * ->thaw() and ->complete() for all devices. | ||
320 | */ | ||
321 | |||
322 | #define PM_EVENT_ON 0x0000 | ||
323 | #define PM_EVENT_FREEZE 0x0001 | ||
324 | #define PM_EVENT_SUSPEND 0x0002 | ||
325 | #define PM_EVENT_HIBERNATE 0x0004 | ||
326 | #define PM_EVENT_QUIESCE 0x0008 | ||
327 | #define PM_EVENT_RESUME 0x0010 | ||
328 | #define PM_EVENT_THAW 0x0020 | ||
329 | #define PM_EVENT_RESTORE 0x0040 | ||
330 | #define PM_EVENT_RECOVER 0x0080 | ||
331 | |||
332 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) | ||
333 | |||
334 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | ||
335 | #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) | ||
336 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) | ||
337 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) | ||
338 | #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) | ||
339 | #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) | ||
340 | #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) | ||
341 | #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) | ||
342 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | ||
343 | |||
344 | /** | ||
345 | * Device power management states | ||
346 | * | ||
347 | * These state labels are used internally by the PM core to indicate the current | ||
348 | * status of a device with respect to the PM core operations. | ||
349 | * | ||
350 | * DPM_ON Device is regarded as operational. Set this way | ||
351 | * initially and when ->complete() is about to be called. | ||
352 | * Also set when ->prepare() fails. | ||
353 | * | ||
354 | * DPM_PREPARING Device is going to be prepared for a PM transition. Set | ||
355 | * when ->prepare() is about to be called. | ||
356 | * | ||
357 | * DPM_RESUMING Device is going to be resumed. Set when ->resume(), | ||
358 | * ->thaw(), or ->restore() is about to be called. | ||
359 | * | ||
360 | * DPM_SUSPENDING Device has been prepared for a power transition. Set | ||
361 | * when ->prepare() has just succeeded. | ||
362 | * | ||
363 | * DPM_OFF Device is regarded as inactive. Set immediately after | ||
364 | * ->suspend(), ->freeze(), or ->poweroff() has succeeded. | ||
365 | * Also set when ->resume()_noirq, ->thaw_noirq(), or | ||
366 | * ->restore_noirq() is about to be called. | ||
367 | * | ||
368 | * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after | ||
369 | * ->suspend_noirq(), ->freeze_noirq(), or | ||
370 | * ->poweroff_noirq() has just succeeded. | ||
371 | */ | ||
372 | |||
373 | enum dpm_state { | ||
374 | DPM_INVALID, | ||
375 | DPM_ON, | ||
376 | DPM_PREPARING, | ||
377 | DPM_RESUMING, | ||
378 | DPM_SUSPENDING, | ||
379 | DPM_OFF, | ||
380 | DPM_OFF_IRQ, | ||
381 | }; | ||
382 | |||
383 | struct dev_pm_info { | ||
384 | pm_message_t power_state; | ||
385 | unsigned can_wakeup:1; | ||
386 | unsigned should_wakeup:1; | ||
387 | enum dpm_state status; /* Owned by the PM core */ | ||
388 | #ifdef CONFIG_PM_SLEEP | ||
389 | struct list_head entry; | ||
390 | #endif | ||
391 | }; | ||
392 | |||
393 | /* | ||
394 | * The PM_EVENT_ messages are also used by drivers implementing the legacy | ||
395 | * suspend framework, based on the ->suspend() and ->resume() callbacks common | ||
396 | * for suspend and hibernation transitions, according to the rules below. | ||
397 | */ | ||
398 | |||
399 | /* Necessary, because several drivers use PM_EVENT_PRETHAW */ | ||
400 | #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE | ||
401 | |||
402 | /* | ||
123 | * One transition is triggered by resume(), after a suspend() call; the | 403 | * One transition is triggered by resume(), after a suspend() call; the |
124 | * message is implicit: | 404 | * message is implicit: |
125 | * | 405 | * |
@@ -164,35 +444,13 @@ typedef struct pm_message { | |||
164 | * or from system low-power states such as standby or suspend-to-RAM. | 444 | * or from system low-power states such as standby or suspend-to-RAM. |
165 | */ | 445 | */ |
166 | 446 | ||
167 | #define PM_EVENT_ON 0 | 447 | #ifdef CONFIG_PM_SLEEP |
168 | #define PM_EVENT_FREEZE 1 | 448 | extern void device_pm_lock(void); |
169 | #define PM_EVENT_SUSPEND 2 | 449 | extern void device_power_up(pm_message_t state); |
170 | #define PM_EVENT_HIBERNATE 4 | 450 | extern void device_resume(pm_message_t state); |
171 | #define PM_EVENT_PRETHAW 8 | ||
172 | |||
173 | #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) | ||
174 | |||
175 | #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) | ||
176 | #define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, }) | ||
177 | #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) | ||
178 | #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) | ||
179 | #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) | ||
180 | |||
181 | struct dev_pm_info { | ||
182 | pm_message_t power_state; | ||
183 | unsigned can_wakeup:1; | ||
184 | unsigned should_wakeup:1; | ||
185 | bool sleeping:1; /* Owned by the PM core */ | ||
186 | #ifdef CONFIG_PM_SLEEP | ||
187 | struct list_head entry; | ||
188 | #endif | ||
189 | }; | ||
190 | 451 | ||
452 | extern void device_pm_unlock(void); | ||
191 | extern int device_power_down(pm_message_t state); | 453 | extern int device_power_down(pm_message_t state); |
192 | extern void device_power_up(void); | ||
193 | extern void device_resume(void); | ||
194 | |||
195 | #ifdef CONFIG_PM_SLEEP | ||
196 | extern int device_suspend(pm_message_t state); | 454 | extern int device_suspend(pm_message_t state); |
197 | extern int device_prepare_suspend(pm_message_t state); | 455 | extern int device_prepare_suspend(pm_message_t state); |
198 | 456 | ||
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 14a656cdc652..d416be0efa8a 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -193,6 +193,7 @@ static int create_image(int platform_mode) | |||
193 | if (error) | 193 | if (error) |
194 | return error; | 194 | return error; |
195 | 195 | ||
196 | device_pm_lock(); | ||
196 | local_irq_disable(); | 197 | local_irq_disable(); |
197 | /* At this point, device_suspend() has been called, but *not* | 198 | /* At this point, device_suspend() has been called, but *not* |
198 | * device_power_down(). We *must* call device_power_down() now. | 199 | * device_power_down(). We *must* call device_power_down() now. |
@@ -224,9 +225,11 @@ static int create_image(int platform_mode) | |||
224 | /* NOTE: device_power_up() is just a resume() for devices | 225 | /* NOTE: device_power_up() is just a resume() for devices |
225 | * that suspended with irqs off ... no overall powerup. | 226 | * that suspended with irqs off ... no overall powerup. |
226 | */ | 227 | */ |
227 | device_power_up(); | 228 | device_power_up(in_suspend ? |
229 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | ||
228 | Enable_irqs: | 230 | Enable_irqs: |
229 | local_irq_enable(); | 231 | local_irq_enable(); |
232 | device_pm_unlock(); | ||
230 | return error; | 233 | return error; |
231 | } | 234 | } |
232 | 235 | ||
@@ -280,7 +283,8 @@ int hibernation_snapshot(int platform_mode) | |||
280 | Finish: | 283 | Finish: |
281 | platform_finish(platform_mode); | 284 | platform_finish(platform_mode); |
282 | Resume_devices: | 285 | Resume_devices: |
283 | device_resume(); | 286 | device_resume(in_suspend ? |
287 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | ||
284 | Resume_console: | 288 | Resume_console: |
285 | resume_console(); | 289 | resume_console(); |
286 | Close: | 290 | Close: |
@@ -300,8 +304,9 @@ static int resume_target_kernel(void) | |||
300 | { | 304 | { |
301 | int error; | 305 | int error; |
302 | 306 | ||
307 | device_pm_lock(); | ||
303 | local_irq_disable(); | 308 | local_irq_disable(); |
304 | error = device_power_down(PMSG_PRETHAW); | 309 | error = device_power_down(PMSG_QUIESCE); |
305 | if (error) { | 310 | if (error) { |
306 | printk(KERN_ERR "PM: Some devices failed to power down, " | 311 | printk(KERN_ERR "PM: Some devices failed to power down, " |
307 | "aborting resume\n"); | 312 | "aborting resume\n"); |
@@ -329,9 +334,10 @@ static int resume_target_kernel(void) | |||
329 | swsusp_free(); | 334 | swsusp_free(); |
330 | restore_processor_state(); | 335 | restore_processor_state(); |
331 | touch_softlockup_watchdog(); | 336 | touch_softlockup_watchdog(); |
332 | device_power_up(); | 337 | device_power_up(PMSG_RECOVER); |
333 | Enable_irqs: | 338 | Enable_irqs: |
334 | local_irq_enable(); | 339 | local_irq_enable(); |
340 | device_pm_unlock(); | ||
335 | return error; | 341 | return error; |
336 | } | 342 | } |
337 | 343 | ||
@@ -350,7 +356,7 @@ int hibernation_restore(int platform_mode) | |||
350 | 356 | ||
351 | pm_prepare_console(); | 357 | pm_prepare_console(); |
352 | suspend_console(); | 358 | suspend_console(); |
353 | error = device_suspend(PMSG_PRETHAW); | 359 | error = device_suspend(PMSG_QUIESCE); |
354 | if (error) | 360 | if (error) |
355 | goto Finish; | 361 | goto Finish; |
356 | 362 | ||
@@ -362,7 +368,7 @@ int hibernation_restore(int platform_mode) | |||
362 | enable_nonboot_cpus(); | 368 | enable_nonboot_cpus(); |
363 | } | 369 | } |
364 | platform_restore_cleanup(platform_mode); | 370 | platform_restore_cleanup(platform_mode); |
365 | device_resume(); | 371 | device_resume(PMSG_RECOVER); |
366 | Finish: | 372 | Finish: |
367 | resume_console(); | 373 | resume_console(); |
368 | pm_restore_console(); | 374 | pm_restore_console(); |
@@ -403,6 +409,7 @@ int hibernation_platform_enter(void) | |||
403 | if (error) | 409 | if (error) |
404 | goto Finish; | 410 | goto Finish; |
405 | 411 | ||
412 | device_pm_lock(); | ||
406 | local_irq_disable(); | 413 | local_irq_disable(); |
407 | error = device_power_down(PMSG_HIBERNATE); | 414 | error = device_power_down(PMSG_HIBERNATE); |
408 | if (!error) { | 415 | if (!error) { |
@@ -411,6 +418,7 @@ int hibernation_platform_enter(void) | |||
411 | while (1); | 418 | while (1); |
412 | } | 419 | } |
413 | local_irq_enable(); | 420 | local_irq_enable(); |
421 | device_pm_unlock(); | ||
414 | 422 | ||
415 | /* | 423 | /* |
416 | * We don't need to reenable the nonboot CPUs or resume consoles, since | 424 | * We don't need to reenable the nonboot CPUs or resume consoles, since |
@@ -419,7 +427,7 @@ int hibernation_platform_enter(void) | |||
419 | Finish: | 427 | Finish: |
420 | hibernation_ops->finish(); | 428 | hibernation_ops->finish(); |
421 | Resume_devices: | 429 | Resume_devices: |
422 | device_resume(); | 430 | device_resume(PMSG_RESTORE); |
423 | Resume_console: | 431 | Resume_console: |
424 | resume_console(); | 432 | resume_console(); |
425 | Close: | 433 | Close: |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 6a6d5eb3524e..d023b6b584e5 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -228,6 +228,7 @@ static int suspend_enter(suspend_state_t state) | |||
228 | { | 228 | { |
229 | int error = 0; | 229 | int error = 0; |
230 | 230 | ||
231 | device_pm_lock(); | ||
231 | arch_suspend_disable_irqs(); | 232 | arch_suspend_disable_irqs(); |
232 | BUG_ON(!irqs_disabled()); | 233 | BUG_ON(!irqs_disabled()); |
233 | 234 | ||
@@ -239,10 +240,11 @@ static int suspend_enter(suspend_state_t state) | |||
239 | if (!suspend_test(TEST_CORE)) | 240 | if (!suspend_test(TEST_CORE)) |
240 | error = suspend_ops->enter(state); | 241 | error = suspend_ops->enter(state); |
241 | 242 | ||
242 | device_power_up(); | 243 | device_power_up(PMSG_RESUME); |
243 | Done: | 244 | Done: |
244 | arch_suspend_enable_irqs(); | 245 | arch_suspend_enable_irqs(); |
245 | BUG_ON(irqs_disabled()); | 246 | BUG_ON(irqs_disabled()); |
247 | device_pm_unlock(); | ||
246 | return error; | 248 | return error; |
247 | } | 249 | } |
248 | 250 | ||
@@ -291,7 +293,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
291 | if (suspend_ops->finish) | 293 | if (suspend_ops->finish) |
292 | suspend_ops->finish(); | 294 | suspend_ops->finish(); |
293 | Resume_devices: | 295 | Resume_devices: |
294 | device_resume(); | 296 | device_resume(PMSG_RESUME); |
295 | Resume_console: | 297 | Resume_console: |
296 | resume_console(); | 298 | resume_console(); |
297 | Close: | 299 | Close: |