diff options
Diffstat (limited to 'drivers/base/power')
-rw-r--r-- | drivers/base/power/main.c | 502 | ||||
-rw-r--r-- | drivers/base/power/power.h | 12 |
2 files changed, 342 insertions, 172 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 691ffb64cc37..200ed5fafd50 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -24,20 +24,45 @@ | |||
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/pm.h> | 25 | #include <linux/pm.h> |
26 | #include <linux/resume-trace.h> | 26 | #include <linux/resume-trace.h> |
27 | #include <linux/rwsem.h> | ||
27 | 28 | ||
28 | #include "../base.h" | 29 | #include "../base.h" |
29 | #include "power.h" | 30 | #include "power.h" |
30 | 31 | ||
32 | /* | ||
33 | * The entries in the dpm_active list are in a depth first order, simply | ||
34 | * because children are guaranteed to be discovered after parents, and | ||
35 | * are inserted at the back of the list on discovery. | ||
36 | * | ||
37 | * All the other lists are kept in the same order, for consistency. | ||
38 | * However the lists aren't always traversed in the same order. | ||
39 | * Semaphores must be acquired from the top (i.e., front) down | ||
40 | * and released in the opposite order. Devices must be suspended | ||
41 | * from the bottom (i.e., end) up and resumed in the opposite order. | ||
42 | * That way no parent will be suspended while it still has an active | ||
43 | * child. | ||
44 | * | ||
45 | * Since device_pm_add() may be called with a device semaphore held, | ||
46 | * we must never try to acquire a device semaphore while holding | ||
47 | * dpm_list_mutex. | ||
48 | */ | ||
49 | |||
31 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
32 | static LIST_HEAD(dpm_off); | 52 | static LIST_HEAD(dpm_off); |
33 | static LIST_HEAD(dpm_off_irq); | 53 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | ||
34 | 55 | ||
35 | static DEFINE_MUTEX(dpm_mtx); | ||
36 | static DEFINE_MUTEX(dpm_list_mtx); | 56 | static DEFINE_MUTEX(dpm_list_mtx); |
37 | 57 | ||
38 | int (*platform_enable_wakeup)(struct device *dev, int is_on); | 58 | static DECLARE_RWSEM(pm_sleep_rwsem); |
39 | 59 | ||
60 | int (*platform_enable_wakeup)(struct device *dev, int is_on); | ||
40 | 61 | ||
62 | /** | ||
63 | * device_pm_add - add a device to the list of active devices | ||
64 | * @dev: Device to be added to the list | ||
65 | */ | ||
41 | void device_pm_add(struct device *dev) | 66 | void device_pm_add(struct device *dev) |
42 | { | 67 | { |
43 | pr_debug("PM: Adding info for %s:%s\n", | 68 | pr_debug("PM: Adding info for %s:%s\n", |
@@ -48,8 +73,36 @@ void device_pm_add(struct device *dev) | |||
48 | mutex_unlock(&dpm_list_mtx); | 73 | mutex_unlock(&dpm_list_mtx); |
49 | } | 74 | } |
50 | 75 | ||
76 | /** | ||
77 | * device_pm_remove - remove a device from the list of active devices | ||
78 | * @dev: Device to be removed from the list | ||
79 | * | ||
80 | * This function also removes the device's PM-related sysfs attributes. | ||
81 | */ | ||
51 | void device_pm_remove(struct device *dev) | 82 | void device_pm_remove(struct device *dev) |
52 | { | 83 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
53 | pr_debug("PM: Removing info for %s:%s\n", | 106 | pr_debug("PM: Removing info for %s:%s\n", |
54 | dev->bus ? dev->bus->name : "No Bus", | 107 | dev->bus ? dev->bus->name : "No Bus", |
55 | kobject_name(&dev->kobj)); | 108 | kobject_name(&dev->kobj)); |
@@ -57,25 +110,124 @@ void device_pm_remove(struct device *dev) | |||
57 | dpm_sysfs_remove(dev); | 110 | dpm_sysfs_remove(dev); |
58 | list_del_init(&dev->power.entry); | 111 | list_del_init(&dev->power.entry); |
59 | mutex_unlock(&dpm_list_mtx); | 112 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * device_pm_schedule_removal - schedule the removal of a suspended device | ||
118 | * @dev: Device to destroy | ||
119 | * | ||
120 | * Moves the device to the dpm_destroy list for further processing by | ||
121 | * unregister_dropped_devices(). | ||
122 | */ | ||
123 | void device_pm_schedule_removal(struct device *dev) | ||
124 | { | ||
125 | pr_debug("PM: Preparing for removal: %s:%s\n", | ||
126 | dev->bus ? dev->bus->name : "No Bus", | ||
127 | kobject_name(&dev->kobj)); | ||
128 | mutex_lock(&dpm_list_mtx); | ||
129 | list_move_tail(&dev->power.entry, &dpm_destroy); | ||
130 | mutex_unlock(&dpm_list_mtx); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * pm_sleep_lock - mutual exclusion for registration and suspend | ||
135 | * | ||
136 | * Returns 0 if no suspend is underway and device registration | ||
137 | * may proceed, otherwise -EBUSY. | ||
138 | */ | ||
139 | int pm_sleep_lock(void) | ||
140 | { | ||
141 | if (down_read_trylock(&pm_sleep_rwsem)) | ||
142 | return 0; | ||
143 | |||
144 | return -EBUSY; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * pm_sleep_unlock - mutual exclusion for registration and suspend | ||
149 | * | ||
150 | * This routine undoes the effect of device_pm_add_lock | ||
151 | * when a device's registration is complete. | ||
152 | */ | ||
153 | void pm_sleep_unlock(void) | ||
154 | { | ||
155 | up_read(&pm_sleep_rwsem); | ||
60 | } | 156 | } |
61 | 157 | ||
62 | 158 | ||
63 | /*------------------------- Resume routines -------------------------*/ | 159 | /*------------------------- Resume routines -------------------------*/ |
64 | 160 | ||
65 | /** | 161 | /** |
66 | * resume_device - Restore state for one device. | 162 | * resume_device_early - Power on one device (early resume). |
67 | * @dev: Device. | 163 | * @dev: Device. |
68 | * | 164 | * |
165 | * Must be called with interrupts disabled. | ||
69 | */ | 166 | */ |
70 | 167 | static int resume_device_early(struct device *dev) | |
71 | static int resume_device(struct device * dev) | ||
72 | { | 168 | { |
73 | int error = 0; | 169 | int error = 0; |
74 | 170 | ||
75 | TRACE_DEVICE(dev); | 171 | TRACE_DEVICE(dev); |
76 | TRACE_RESUME(0); | 172 | TRACE_RESUME(0); |
77 | 173 | ||
78 | down(&dev->sem); | 174 | if (dev->bus && dev->bus->resume_early) { |
175 | dev_dbg(dev, "EARLY resume\n"); | ||
176 | error = dev->bus->resume_early(dev); | ||
177 | } | ||
178 | |||
179 | TRACE_RESUME(error); | ||
180 | return error; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * dpm_power_up - Power on all regular (non-sysdev) devices. | ||
185 | * | ||
186 | * Walk the dpm_off_irq list and power each device up. This | ||
187 | * is used for devices that required they be powered down with | ||
188 | * interrupts disabled. As devices are powered on, they are moved | ||
189 | * to the dpm_off list. | ||
190 | * | ||
191 | * Must be called with interrupts disabled and only one CPU running. | ||
192 | */ | ||
193 | static void dpm_power_up(void) | ||
194 | { | ||
195 | |||
196 | while (!list_empty(&dpm_off_irq)) { | ||
197 | struct list_head *entry = dpm_off_irq.next; | ||
198 | struct device *dev = to_device(entry); | ||
199 | |||
200 | list_move_tail(entry, &dpm_off); | ||
201 | resume_device_early(dev); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * device_power_up - Turn on all devices that need special attention. | ||
207 | * | ||
208 | * Power on system devices, then devices that required we shut them down | ||
209 | * with interrupts disabled. | ||
210 | * | ||
211 | * Must be called with interrupts disabled. | ||
212 | */ | ||
213 | void device_power_up(void) | ||
214 | { | ||
215 | sysdev_resume(); | ||
216 | dpm_power_up(); | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(device_power_up); | ||
219 | |||
220 | /** | ||
221 | * resume_device - Restore state for one device. | ||
222 | * @dev: Device. | ||
223 | * | ||
224 | */ | ||
225 | static int resume_device(struct device *dev) | ||
226 | { | ||
227 | int error = 0; | ||
228 | |||
229 | TRACE_DEVICE(dev); | ||
230 | TRACE_RESUME(0); | ||
79 | 231 | ||
80 | if (dev->bus && dev->bus->resume) { | 232 | if (dev->bus && dev->bus->resume) { |
81 | dev_dbg(dev,"resuming\n"); | 233 | dev_dbg(dev,"resuming\n"); |
@@ -92,126 +244,94 @@ static int resume_device(struct device * dev) | |||
92 | error = dev->class->resume(dev); | 244 | error = dev->class->resume(dev); |
93 | } | 245 | } |
94 | 246 | ||
95 | up(&dev->sem); | ||
96 | |||
97 | TRACE_RESUME(error); | 247 | TRACE_RESUME(error); |
98 | return error; | 248 | return error; |
99 | } | 249 | } |
100 | 250 | ||
101 | 251 | /** | |
102 | static int resume_device_early(struct device * dev) | 252 | * dpm_resume - Resume every device. |
103 | { | 253 | * |
104 | int error = 0; | 254 | * Resume the devices that have either not gone through |
105 | 255 | * the late suspend, or that did go through it but also | |
106 | TRACE_DEVICE(dev); | 256 | * went through the early resume. |
107 | TRACE_RESUME(0); | 257 | * |
108 | if (dev->bus && dev->bus->resume_early) { | 258 | * Take devices from the dpm_off_list, resume them, |
109 | dev_dbg(dev,"EARLY resume\n"); | 259 | * and put them on the dpm_locked list. |
110 | error = dev->bus->resume_early(dev); | ||
111 | } | ||
112 | TRACE_RESUME(error); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Resume the devices that have either not gone through | ||
118 | * the late suspend, or that did go through it but also | ||
119 | * went through the early resume | ||
120 | */ | 260 | */ |
121 | static void dpm_resume(void) | 261 | static void dpm_resume(void) |
122 | { | 262 | { |
123 | mutex_lock(&dpm_list_mtx); | 263 | mutex_lock(&dpm_list_mtx); |
124 | while(!list_empty(&dpm_off)) { | 264 | while(!list_empty(&dpm_off)) { |
125 | struct list_head * entry = dpm_off.next; | 265 | struct list_head *entry = dpm_off.next; |
126 | struct device * dev = to_device(entry); | 266 | struct device *dev = to_device(entry); |
127 | |||
128 | get_device(dev); | ||
129 | list_move_tail(entry, &dpm_active); | ||
130 | 267 | ||
268 | list_move_tail(entry, &dpm_locked); | ||
131 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
132 | resume_device(dev); | 270 | resume_device(dev); |
133 | mutex_lock(&dpm_list_mtx); | 271 | mutex_lock(&dpm_list_mtx); |
134 | put_device(dev); | ||
135 | } | 272 | } |
136 | mutex_unlock(&dpm_list_mtx); | 273 | mutex_unlock(&dpm_list_mtx); |
137 | } | 274 | } |
138 | 275 | ||
139 | |||
140 | /** | 276 | /** |
141 | * device_resume - Restore state of each device in system. | 277 | * unlock_all_devices - Release each device's semaphore |
142 | * | 278 | * |
143 | * Walk the dpm_off list, remove each entry, resume the device, | 279 | * Go through the dpm_off list. Put each device on the dpm_active |
144 | * then add it to the dpm_active list. | 280 | * list and unlock it. |
145 | */ | 281 | */ |
146 | 282 | static void unlock_all_devices(void) | |
147 | void device_resume(void) | ||
148 | { | 283 | { |
149 | might_sleep(); | 284 | mutex_lock(&dpm_list_mtx); |
150 | mutex_lock(&dpm_mtx); | 285 | while (!list_empty(&dpm_locked)) { |
151 | dpm_resume(); | 286 | struct list_head *entry = dpm_locked.prev; |
152 | mutex_unlock(&dpm_mtx); | 287 | struct device *dev = to_device(entry); |
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL_GPL(device_resume); | ||
156 | 288 | ||
289 | list_move(entry, &dpm_active); | ||
290 | up(&dev->sem); | ||
291 | } | ||
292 | mutex_unlock(&dpm_list_mtx); | ||
293 | } | ||
157 | 294 | ||
158 | /** | 295 | /** |
159 | * dpm_power_up - Power on some devices. | 296 | * unregister_dropped_devices - Unregister devices scheduled for removal |
160 | * | ||
161 | * Walk the dpm_off_irq list and power each device up. This | ||
162 | * is used for devices that required they be powered down with | ||
163 | * interrupts disabled. As devices are powered on, they are moved | ||
164 | * to the dpm_active list. | ||
165 | * | 297 | * |
166 | * Interrupts must be disabled when calling this. | 298 | * Unregister all devices on the dpm_destroy list. |
167 | */ | 299 | */ |
168 | 300 | static void unregister_dropped_devices(void) | |
169 | static void dpm_power_up(void) | ||
170 | { | 301 | { |
171 | while(!list_empty(&dpm_off_irq)) { | 302 | mutex_lock(&dpm_list_mtx); |
172 | struct list_head * entry = dpm_off_irq.next; | 303 | while (!list_empty(&dpm_destroy)) { |
173 | struct device * dev = to_device(entry); | 304 | struct list_head *entry = dpm_destroy.next; |
305 | struct device *dev = to_device(entry); | ||
174 | 306 | ||
175 | list_move_tail(entry, &dpm_off); | 307 | up(&dev->sem); |
176 | resume_device_early(dev); | 308 | mutex_unlock(&dpm_list_mtx); |
309 | /* This also removes the device from the list */ | ||
310 | device_unregister(dev); | ||
311 | mutex_lock(&dpm_list_mtx); | ||
177 | } | 312 | } |
313 | mutex_unlock(&dpm_list_mtx); | ||
178 | } | 314 | } |
179 | 315 | ||
180 | |||
181 | /** | 316 | /** |
182 | * device_power_up - Turn on all devices that need special attention. | 317 | * device_resume - Restore state of each device in system. |
183 | * | 318 | * |
184 | * Power on system devices then devices that required we shut them down | 319 | * Resume all the devices, unlock them all, and allow new |
185 | * with interrupts disabled. | 320 | * devices to be registered once again. |
186 | * Called with interrupts disabled. | ||
187 | */ | 321 | */ |
188 | 322 | void device_resume(void) | |
189 | void device_power_up(void) | ||
190 | { | 323 | { |
191 | sysdev_resume(); | 324 | might_sleep(); |
192 | dpm_power_up(); | 325 | dpm_resume(); |
326 | unlock_all_devices(); | ||
327 | unregister_dropped_devices(); | ||
328 | up_write(&pm_sleep_rwsem); | ||
193 | } | 329 | } |
194 | 330 | EXPORT_SYMBOL_GPL(device_resume); | |
195 | EXPORT_SYMBOL_GPL(device_power_up); | ||
196 | 331 | ||
197 | 332 | ||
198 | /*------------------------- Suspend routines -------------------------*/ | 333 | /*------------------------- Suspend routines -------------------------*/ |
199 | 334 | ||
200 | /* | ||
201 | * The entries in the dpm_active list are in a depth first order, simply | ||
202 | * because children are guaranteed to be discovered after parents, and | ||
203 | * are inserted at the back of the list on discovery. | ||
204 | * | ||
205 | * All list on the suspend path are done in reverse order, so we operate | ||
206 | * on the leaves of the device tree (or forests, depending on how you want | ||
207 | * to look at it ;) first. As nodes are removed from the back of the list, | ||
208 | * they are inserted into the front of their destintation lists. | ||
209 | * | ||
210 | * Things are the reverse on the resume path - iterations are done in | ||
211 | * forward order, and nodes are inserted at the back of their destination | ||
212 | * lists. This way, the ancestors will be accessed before their descendents. | ||
213 | */ | ||
214 | |||
215 | static inline char *suspend_verb(u32 event) | 335 | static inline char *suspend_verb(u32 event) |
216 | { | 336 | { |
217 | switch (event) { | 337 | switch (event) { |
@@ -222,7 +342,6 @@ static inline char *suspend_verb(u32 event) | |||
222 | } | 342 | } |
223 | } | 343 | } |
224 | 344 | ||
225 | |||
226 | static void | 345 | static void |
227 | suspend_device_dbg(struct device *dev, pm_message_t state, char *info) | 346 | suspend_device_dbg(struct device *dev, pm_message_t state, char *info) |
228 | { | 347 | { |
@@ -232,16 +351,73 @@ suspend_device_dbg(struct device *dev, pm_message_t state, char *info) | |||
232 | } | 351 | } |
233 | 352 | ||
234 | /** | 353 | /** |
235 | * suspend_device - Save state of one device. | 354 | * suspend_device_late - Shut down one device (late suspend). |
236 | * @dev: Device. | 355 | * @dev: Device. |
237 | * @state: Power state device is entering. | 356 | * @state: Power state device is entering. |
357 | * | ||
358 | * This is called with interrupts off and only a single CPU running. | ||
238 | */ | 359 | */ |
360 | static int suspend_device_late(struct device *dev, pm_message_t state) | ||
361 | { | ||
362 | int error = 0; | ||
239 | 363 | ||
240 | static int suspend_device(struct device * dev, pm_message_t state) | 364 | if (dev->bus && dev->bus->suspend_late) { |
365 | suspend_device_dbg(dev, state, "LATE "); | ||
366 | error = dev->bus->suspend_late(dev, state); | ||
367 | suspend_report_result(dev->bus->suspend_late, error); | ||
368 | } | ||
369 | return error; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * device_power_down - Shut down special devices. | ||
374 | * @state: Power state to enter. | ||
375 | * | ||
376 | * Power down devices that require interrupts to be disabled | ||
377 | * and move them from the dpm_off list to the dpm_off_irq list. | ||
378 | * Then power down system devices. | ||
379 | * | ||
380 | * Must be called with interrupts disabled and only one CPU running. | ||
381 | */ | ||
382 | int device_power_down(pm_message_t state) | ||
383 | { | ||
384 | int error = 0; | ||
385 | |||
386 | while (!list_empty(&dpm_off)) { | ||
387 | struct list_head *entry = dpm_off.prev; | ||
388 | struct device *dev = to_device(entry); | ||
389 | |||
390 | list_del_init(&dev->power.entry); | ||
391 | error = suspend_device_late(dev, state); | ||
392 | if (error) { | ||
393 | printk(KERN_ERR "Could not power down device %s: " | ||
394 | "error %d\n", | ||
395 | kobject_name(&dev->kobj), error); | ||
396 | if (list_empty(&dev->power.entry)) | ||
397 | list_add(&dev->power.entry, &dpm_off); | ||
398 | break; | ||
399 | } | ||
400 | if (list_empty(&dev->power.entry)) | ||
401 | list_add(&dev->power.entry, &dpm_off_irq); | ||
402 | } | ||
403 | |||
404 | if (!error) | ||
405 | error = sysdev_suspend(state); | ||
406 | if (error) | ||
407 | dpm_power_up(); | ||
408 | return error; | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(device_power_down); | ||
411 | |||
412 | /** | ||
413 | * suspend_device - Save state of one device. | ||
414 | * @dev: Device. | ||
415 | * @state: Power state device is entering. | ||
416 | */ | ||
417 | int suspend_device(struct device *dev, pm_message_t state) | ||
241 | { | 418 | { |
242 | int error = 0; | 419 | int error = 0; |
243 | 420 | ||
244 | down(&dev->sem); | ||
245 | if (dev->power.power_state.event) { | 421 | if (dev->power.power_state.event) { |
246 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 422 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
247 | dev->power.power_state.event, state.event); | 423 | dev->power.power_state.event, state.event); |
@@ -264,123 +440,105 @@ static int suspend_device(struct device * dev, pm_message_t state) | |||
264 | error = dev->bus->suspend(dev, state); | 440 | error = dev->bus->suspend(dev, state); |
265 | suspend_report_result(dev->bus->suspend, error); | 441 | suspend_report_result(dev->bus->suspend, error); |
266 | } | 442 | } |
267 | up(&dev->sem); | ||
268 | return error; | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * This is called with interrupts off, only a single CPU | ||
274 | * running. We can't acquire a mutex or semaphore (and we don't | ||
275 | * need the protection) | ||
276 | */ | ||
277 | static int suspend_device_late(struct device *dev, pm_message_t state) | ||
278 | { | ||
279 | int error = 0; | ||
280 | |||
281 | if (dev->bus && dev->bus->suspend_late) { | ||
282 | suspend_device_dbg(dev, state, "LATE "); | ||
283 | error = dev->bus->suspend_late(dev, state); | ||
284 | suspend_report_result(dev->bus->suspend_late, error); | ||
285 | } | ||
286 | return error; | 443 | return error; |
287 | } | 444 | } |
288 | 445 | ||
289 | /** | 446 | /** |
290 | * device_suspend - Save state and stop all devices in system. | 447 | * dpm_suspend - Suspend every device. |
291 | * @state: Power state to put each device in. | 448 | * @state: Power state to put each device in. |
292 | * | 449 | * |
293 | * Walk the dpm_active list, call ->suspend() for each device, and move | 450 | * Walk the dpm_locked list. Suspend each device and move it |
294 | * it to the dpm_off list. | 451 | * to the dpm_off list. |
295 | * | 452 | * |
296 | * (For historical reasons, if it returns -EAGAIN, that used to mean | 453 | * (For historical reasons, if it returns -EAGAIN, that used to mean |
297 | * that the device would be called again with interrupts disabled. | 454 | * that the device would be called again with interrupts disabled. |
298 | * These days, we use the "suspend_late()" callback for that, so we | 455 | * These days, we use the "suspend_late()" callback for that, so we |
299 | * print a warning and consider it an error). | 456 | * print a warning and consider it an error). |
300 | * | ||
301 | * If we get a different error, try and back out. | ||
302 | * | ||
303 | * If we hit a failure with any of the devices, call device_resume() | ||
304 | * above to bring the suspended devices back to life. | ||
305 | * | ||
306 | */ | 457 | */ |
307 | 458 | static int dpm_suspend(pm_message_t state) | |
308 | int device_suspend(pm_message_t state) | ||
309 | { | 459 | { |
310 | int error = 0; | 460 | int error = 0; |
311 | 461 | ||
312 | might_sleep(); | ||
313 | mutex_lock(&dpm_mtx); | ||
314 | mutex_lock(&dpm_list_mtx); | 462 | mutex_lock(&dpm_list_mtx); |
315 | while (!list_empty(&dpm_active) && error == 0) { | 463 | while (!list_empty(&dpm_locked)) { |
316 | struct list_head * entry = dpm_active.prev; | 464 | struct list_head *entry = dpm_locked.prev; |
317 | struct device * dev = to_device(entry); | 465 | struct device *dev = to_device(entry); |
318 | 466 | ||
319 | get_device(dev); | 467 | list_del_init(&dev->power.entry); |
320 | mutex_unlock(&dpm_list_mtx); | 468 | mutex_unlock(&dpm_list_mtx); |
321 | |||
322 | error = suspend_device(dev, state); | 469 | error = suspend_device(dev, state); |
323 | 470 | if (error) { | |
324 | mutex_lock(&dpm_list_mtx); | ||
325 | |||
326 | /* Check if the device got removed */ | ||
327 | if (!list_empty(&dev->power.entry)) { | ||
328 | /* Move it to the dpm_off list */ | ||
329 | if (!error) | ||
330 | list_move(&dev->power.entry, &dpm_off); | ||
331 | } | ||
332 | if (error) | ||
333 | printk(KERN_ERR "Could not suspend device %s: " | 471 | printk(KERN_ERR "Could not suspend device %s: " |
334 | "error %d%s\n", | 472 | "error %d%s\n", |
335 | kobject_name(&dev->kobj), error, | 473 | kobject_name(&dev->kobj), |
336 | error == -EAGAIN ? " (please convert to suspend_late)" : ""); | 474 | error, |
337 | put_device(dev); | 475 | (error == -EAGAIN ? |
476 | " (please convert to suspend_late)" : | ||
477 | "")); | ||
478 | mutex_lock(&dpm_list_mtx); | ||
479 | if (list_empty(&dev->power.entry)) | ||
480 | list_add(&dev->power.entry, &dpm_locked); | ||
481 | mutex_unlock(&dpm_list_mtx); | ||
482 | break; | ||
483 | } | ||
484 | mutex_lock(&dpm_list_mtx); | ||
485 | if (list_empty(&dev->power.entry)) | ||
486 | list_add(&dev->power.entry, &dpm_off); | ||
338 | } | 487 | } |
339 | mutex_unlock(&dpm_list_mtx); | 488 | mutex_unlock(&dpm_list_mtx); |
340 | if (error) | ||
341 | dpm_resume(); | ||
342 | 489 | ||
343 | mutex_unlock(&dpm_mtx); | ||
344 | return error; | 490 | return error; |
345 | } | 491 | } |
346 | 492 | ||
347 | EXPORT_SYMBOL_GPL(device_suspend); | ||
348 | |||
349 | /** | 493 | /** |
350 | * device_power_down - Shut down special devices. | 494 | * lock_all_devices - Acquire every device's semaphore |
351 | * @state: Power state to enter. | ||
352 | * | 495 | * |
353 | * Walk the dpm_off_irq list, calling ->power_down() for each device that | 496 | * Go through the dpm_active list. Carefully lock each device's |
354 | * couldn't power down the device with interrupts enabled. When we're | 497 | * semaphore and put it in on the dpm_locked list. |
355 | * done, power down system devices. | ||
356 | */ | 498 | */ |
357 | 499 | static void lock_all_devices(void) | |
358 | int device_power_down(pm_message_t state) | ||
359 | { | 500 | { |
360 | int error = 0; | 501 | mutex_lock(&dpm_list_mtx); |
361 | struct device * dev; | 502 | while (!list_empty(&dpm_active)) { |
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
362 | 505 | ||
363 | while (!list_empty(&dpm_off)) { | 506 | /* Required locking order is dev->sem first, |
364 | struct list_head * entry = dpm_off.prev; | 507 | * then dpm_list_mutex. Hence this awkward code. |
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
365 | 513 | ||
366 | dev = to_device(entry); | 514 | if (list_empty(entry)) |
367 | error = suspend_device_late(dev, state); | 515 | up(&dev->sem); /* Device was removed */ |
368 | if (error) | 516 | else |
369 | goto Error; | 517 | list_move_tail(entry, &dpm_locked); |
370 | list_move(&dev->power.entry, &dpm_off_irq); | 518 | put_device(dev); |
371 | } | 519 | } |
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | ||
525 | * | ||
526 | * Prevent new devices from being registered, then lock all devices | ||
527 | * and suspend them. | ||
528 | */ | ||
529 | int device_suspend(pm_message_t state) | ||
530 | { | ||
531 | int error; | ||
372 | 532 | ||
373 | error = sysdev_suspend(state); | 533 | might_sleep(); |
374 | Done: | 534 | down_write(&pm_sleep_rwsem); |
535 | lock_all_devices(); | ||
536 | error = dpm_suspend(state); | ||
537 | if (error) | ||
538 | device_resume(); | ||
375 | return error; | 539 | return error; |
376 | Error: | ||
377 | printk(KERN_ERR "Could not power down device %s: " | ||
378 | "error %d\n", kobject_name(&dev->kobj), error); | ||
379 | dpm_power_up(); | ||
380 | goto Done; | ||
381 | } | 540 | } |
382 | 541 | EXPORT_SYMBOL_GPL(device_suspend); | |
383 | EXPORT_SYMBOL_GPL(device_power_down); | ||
384 | 542 | ||
385 | void __suspend_report_result(const char *function, void *fn, int ret) | 543 | void __suspend_report_result(const char *function, void *fn, int ret) |
386 | { | 544 | { |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 379da4e958e0..10c20840395e 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -20,6 +20,9 @@ static inline struct device *to_device(struct list_head *entry) | |||
20 | 20 | ||
21 | extern void device_pm_add(struct device *); | 21 | extern void device_pm_add(struct device *); |
22 | extern void device_pm_remove(struct device *); | 22 | extern void device_pm_remove(struct device *); |
23 | extern void device_pm_schedule_removal(struct device *); | ||
24 | extern int pm_sleep_lock(void); | ||
25 | extern void pm_sleep_unlock(void); | ||
23 | 26 | ||
24 | #else /* CONFIG_PM_SLEEP */ | 27 | #else /* CONFIG_PM_SLEEP */ |
25 | 28 | ||
@@ -32,6 +35,15 @@ static inline void device_pm_remove(struct device *dev) | |||
32 | { | 35 | { |
33 | } | 36 | } |
34 | 37 | ||
38 | static inline int pm_sleep_lock(void) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static inline void pm_sleep_unlock(void) | ||
44 | { | ||
45 | } | ||
46 | |||
35 | #endif | 47 | #endif |
36 | 48 | ||
37 | #ifdef CONFIG_PM | 49 | #ifdef CONFIG_PM |