aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/power/runtime.c375
1 files changed, 140 insertions, 235 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0c1db879544..d7b5d84c235 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -2,6 +2,7 @@
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 * 3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
5 * 6 *
6 * This file is released under the GPLv2. 7 * This file is released under the GPLv2.
7 */ 8 */
@@ -11,8 +12,6 @@
11#include <linux/jiffies.h> 12#include <linux/jiffies.h>
12 13
13static int __pm_runtime_resume(struct device *dev, int rpmflags); 14static int __pm_runtime_resume(struct device *dev, int rpmflags);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16 15
17/** 16/**
18 * update_pm_runtime_accounting - Update the time accounting of power states 17 * update_pm_runtime_accounting - Update the time accounting of power states
@@ -79,40 +78,84 @@ static void pm_runtime_cancel_pending(struct device *dev)
79} 78}
80 79
81/** 80/**
82 * __pm_runtime_idle - Notify device bus type if the device can be suspended. 81 * rpm_check_suspend_allowed - Test whether a device may be suspended.
83 * @dev: Device to notify the bus type about. 82 * @dev: Device to test.
84 *
85 * This function must be called under dev->power.lock with interrupts disabled.
86 */ 83 */
87static int __pm_runtime_idle(struct device *dev) 84static int rpm_check_suspend_allowed(struct device *dev)
88 __releases(&dev->power.lock) __acquires(&dev->power.lock)
89{ 85{
90 int retval = 0; 86 int retval = 0;
91 87
92 if (dev->power.runtime_error) 88 if (dev->power.runtime_error)
93 retval = -EINVAL; 89 retval = -EINVAL;
94 else if (dev->power.idle_notification)
95 retval = -EINPROGRESS;
96 else if (atomic_read(&dev->power.usage_count) > 0 90 else if (atomic_read(&dev->power.usage_count) > 0
97 || dev->power.disable_depth > 0 91 || dev->power.disable_depth > 0)
98 || dev->power.runtime_status != RPM_ACTIVE)
99 retval = -EAGAIN; 92 retval = -EAGAIN;
100 else if (!pm_children_suspended(dev)) 93 else if (!pm_children_suspended(dev))
101 retval = -EBUSY; 94 retval = -EBUSY;
95
96 /* Pending resume requests take precedence over suspends. */
97 else if ((dev->power.deferred_resume
98 && dev->power.status == RPM_SUSPENDING)
99 || (dev->power.request_pending
100 && dev->power.request == RPM_REQ_RESUME))
101 retval = -EAGAIN;
102 else if (dev->power.runtime_status == RPM_SUSPENDED)
103 retval = 1;
104
105 return retval;
106}
107
108
109/**
110 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
111 * @dev: Device to notify the bus type about.
112 * @rpmflags: Flag bits.
113 *
114 * Check if the device's run-time PM status allows it to be suspended. If
115 * another idle notification has been started earlier, return immediately. If
116 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
117 * run the ->runtime_idle() callback directly.
118 *
119 * This function must be called under dev->power.lock with interrupts disabled.
120 */
121static int __pm_runtime_idle(struct device *dev, int rpmflags)
122 __releases(&dev->power.lock) __acquires(&dev->power.lock)
123{
124 int retval;
125
126 retval = rpm_check_suspend_allowed(dev);
127 if (retval < 0)
128 ; /* Conditions are wrong. */
129
130 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
131 else if (dev->power.runtime_status != RPM_ACTIVE)
132 retval = -EAGAIN;
133
134 /*
135 * Any pending request other than an idle notification takes
136 * precedence over us, except that the timer may be running.
137 */
138 else if (dev->power.request_pending &&
139 dev->power.request > RPM_REQ_IDLE)
140 retval = -EAGAIN;
141
142 /* Act as though RPM_NOWAIT is always set. */
143 else if (dev->power.idle_notification)
144 retval = -EINPROGRESS;
102 if (retval) 145 if (retval)
103 goto out; 146 goto out;
104 147
105 if (dev->power.request_pending) { 148 /* Pending requests need to be canceled. */
106 /* 149 dev->power.request = RPM_REQ_NONE;
107 * If an idle notification request is pending, cancel it. Any 150
108 * other pending request takes precedence over us. 151 /* Carry out an asynchronous or a synchronous idle notification. */
109 */ 152 if (rpmflags & RPM_ASYNC) {
110 if (dev->power.request == RPM_REQ_IDLE) { 153 dev->power.request = RPM_REQ_IDLE;
111 dev->power.request = RPM_REQ_NONE; 154 if (!dev->power.request_pending) {
112 } else if (dev->power.request != RPM_REQ_NONE) { 155 dev->power.request_pending = true;
113 retval = -EAGAIN; 156 queue_work(pm_wq, &dev->power.work);
114 goto out;
115 } 157 }
158 goto out;
116 } 159 }
117 160
118 dev->power.idle_notification = true; 161 dev->power.idle_notification = true;
@@ -154,7 +197,7 @@ int pm_runtime_idle(struct device *dev)
154 int retval; 197 int retval;
155 198
156 spin_lock_irq(&dev->power.lock); 199 spin_lock_irq(&dev->power.lock);
157 retval = __pm_runtime_idle(dev); 200 retval = __pm_runtime_idle(dev, 0);
158 spin_unlock_irq(&dev->power.lock); 201 spin_unlock_irq(&dev->power.lock);
159 202
160 return retval; 203 return retval;
@@ -166,11 +209,14 @@ EXPORT_SYMBOL_GPL(pm_runtime_idle);
166 * @dev: Device to suspend. 209 * @dev: Device to suspend.
167 * @rpmflags: Flag bits. 210 * @rpmflags: Flag bits.
168 * 211 *
169 * Check if the device can be suspended and run the ->runtime_suspend() callback 212 * Check if the device's run-time PM status allows it to be suspended. If
170 * provided by its bus type. If another suspend has been started earlier, 213 * another suspend has been started earlier, either return immediately or wait
171 * either return immediately or wait for it to finish, depending on the 214 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
172 * RPM_NOWAIT flag. If an idle notification or suspend request is pending or 215 * pending idle notification. If the RPM_ASYNC flag is set then queue a
173 * scheduled, cancel it. 216 * suspend request; otherwise run the ->runtime_suspend() callback directly.
217 * If a deferred resume was requested while the callback was running then carry
218 * it out; otherwise send an idle notification for the device (if the suspend
219 * failed) or for its parent (if the suspend succeeded).
174 * 220 *
175 * This function must be called under dev->power.lock with interrupts disabled. 221 * This function must be called under dev->power.lock with interrupts disabled.
176 */ 222 */
@@ -179,41 +225,30 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
179{ 225{
180 struct device *parent = NULL; 226 struct device *parent = NULL;
181 bool notify = false; 227 bool notify = false;
182 int retval = 0; 228 int retval;
183 229
184 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 230 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
185 231
186 repeat: 232 repeat:
187 if (dev->power.runtime_error) { 233 retval = rpm_check_suspend_allowed(dev);
188 retval = -EINVAL;
189 goto out;
190 }
191 234
192 /* Pending resume requests take precedence over us. */ 235 if (retval < 0)
193 if (dev->power.request_pending 236 ; /* Conditions are wrong. */
194 && dev->power.request == RPM_REQ_RESUME) { 237
238 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
239 else if (dev->power.runtime_status == RPM_RESUMING &&
240 !(rpmflags & RPM_ASYNC))
195 retval = -EAGAIN; 241 retval = -EAGAIN;
242 if (retval)
196 goto out; 243 goto out;
197 }
198 244
199 /* Other scheduled or pending requests need to be canceled. */ 245 /* Other scheduled or pending requests need to be canceled. */
200 pm_runtime_cancel_pending(dev); 246 pm_runtime_cancel_pending(dev);
201 247
202 if (dev->power.runtime_status == RPM_SUSPENDED)
203 retval = 1;
204 else if (dev->power.runtime_status == RPM_RESUMING
205 || dev->power.disable_depth > 0
206 || atomic_read(&dev->power.usage_count) > 0)
207 retval = -EAGAIN;
208 else if (!pm_children_suspended(dev))
209 retval = -EBUSY;
210 if (retval)
211 goto out;
212
213 if (dev->power.runtime_status == RPM_SUSPENDING) { 248 if (dev->power.runtime_status == RPM_SUSPENDING) {
214 DEFINE_WAIT(wait); 249 DEFINE_WAIT(wait);
215 250
216 if (rpmflags & RPM_NOWAIT) { 251 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
217 retval = -EINPROGRESS; 252 retval = -EINPROGRESS;
218 goto out; 253 goto out;
219 } 254 }
@@ -235,6 +270,16 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
235 goto repeat; 270 goto repeat;
236 } 271 }
237 272
273 /* Carry out an asynchronous or a synchronous suspend. */
274 if (rpmflags & RPM_ASYNC) {
275 dev->power.request = RPM_REQ_SUSPEND;
276 if (!dev->power.request_pending) {
277 dev->power.request_pending = true;
278 queue_work(pm_wq, &dev->power.work);
279 }
280 goto out;
281 }
282
238 __update_runtime_status(dev, RPM_SUSPENDING); 283 __update_runtime_status(dev, RPM_SUSPENDING);
239 dev->power.deferred_resume = false; 284 dev->power.deferred_resume = false;
240 285
@@ -267,6 +312,7 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
267 312
268 if (retval) { 313 if (retval) {
269 __update_runtime_status(dev, RPM_ACTIVE); 314 __update_runtime_status(dev, RPM_ACTIVE);
315 dev->power.deferred_resume = 0;
270 if (retval == -EAGAIN || retval == -EBUSY) { 316 if (retval == -EAGAIN || retval == -EBUSY) {
271 if (dev->power.timer_expires == 0) 317 if (dev->power.timer_expires == 0)
272 notify = true; 318 notify = true;
@@ -292,7 +338,7 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
292 } 338 }
293 339
294 if (notify) 340 if (notify)
295 __pm_runtime_idle(dev); 341 __pm_runtime_idle(dev, 0);
296 342
297 if (parent && !parent->power.ignore_children) { 343 if (parent && !parent->power.ignore_children) {
298 spin_unlock_irq(&dev->power.lock); 344 spin_unlock_irq(&dev->power.lock);
@@ -329,13 +375,15 @@ EXPORT_SYMBOL_GPL(pm_runtime_suspend);
329 * @dev: Device to resume. 375 * @dev: Device to resume.
330 * @rpmflags: Flag bits. 376 * @rpmflags: Flag bits.
331 * 377 *
332 * Check if the device can be woken up and run the ->runtime_resume() callback 378 * Check if the device's run-time PM status allows it to be resumed. Cancel
333 * provided by its bus type. If another resume has been started earlier, 379 * any scheduled or pending requests. If another resume has been started
334 * either return imediately or wait for it to finish, depending on the 380 * earlier, either return imediately or wait for it to finish, depending on the
335 * RPM_NOWAIT flag. If there's a suspend running in parallel with this 381 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
336 * function, either tell the other process to resume after suspending 382 * parallel with this function, either tell the other process to resume after
337 * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT 383 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
338 * flag. Cancel any scheduled or pending requests. 384 * flag is set then queue a resume request; otherwise run the
385 * ->runtime_resume() callback directly. Queue an idle notification for the
386 * device if the resume succeeded.
339 * 387 *
340 * This function must be called under dev->power.lock with interrupts disabled. 388 * This function must be called under dev->power.lock with interrupts disabled.
341 */ 389 */
@@ -348,28 +396,30 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
348 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 396 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
349 397
350 repeat: 398 repeat:
351 if (dev->power.runtime_error) { 399 if (dev->power.runtime_error)
352 retval = -EINVAL; 400 retval = -EINVAL;
401 else if (dev->power.disable_depth > 0)
402 retval = -EAGAIN;
403 if (retval)
353 goto out; 404 goto out;
354 }
355 405
406 /* Other scheduled or pending requests need to be canceled. */
356 pm_runtime_cancel_pending(dev); 407 pm_runtime_cancel_pending(dev);
357 408
358 if (dev->power.runtime_status == RPM_ACTIVE) 409 if (dev->power.runtime_status == RPM_ACTIVE) {
359 retval = 1; 410 retval = 1;
360 else if (dev->power.disable_depth > 0)
361 retval = -EAGAIN;
362 if (retval)
363 goto out; 411 goto out;
412 }
364 413
365 if (dev->power.runtime_status == RPM_RESUMING 414 if (dev->power.runtime_status == RPM_RESUMING
366 || dev->power.runtime_status == RPM_SUSPENDING) { 415 || dev->power.runtime_status == RPM_SUSPENDING) {
367 DEFINE_WAIT(wait); 416 DEFINE_WAIT(wait);
368 417
369 if (rpmflags & RPM_NOWAIT) { 418 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
370 if (dev->power.runtime_status == RPM_SUSPENDING) 419 if (dev->power.runtime_status == RPM_SUSPENDING)
371 dev->power.deferred_resume = true; 420 dev->power.deferred_resume = true;
372 retval = -EINPROGRESS; 421 else
422 retval = -EINPROGRESS;
373 goto out; 423 goto out;
374 } 424 }
375 425
@@ -391,6 +441,17 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
391 goto repeat; 441 goto repeat;
392 } 442 }
393 443
444 /* Carry out an asynchronous or a synchronous resume. */
445 if (rpmflags & RPM_ASYNC) {
446 dev->power.request = RPM_REQ_RESUME;
447 if (!dev->power.request_pending) {
448 dev->power.request_pending = true;
449 queue_work(pm_wq, &dev->power.work);
450 }
451 retval = 0;
452 goto out;
453 }
454
394 if (!parent && dev->parent) { 455 if (!parent && dev->parent) {
395 /* 456 /*
396 * Increment the parent's resume counter and resume it if 457 * Increment the parent's resume counter and resume it if
@@ -460,7 +521,7 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
460 wake_up_all(&dev->power.wait_queue); 521 wake_up_all(&dev->power.wait_queue);
461 522
462 if (!retval) 523 if (!retval)
463 __pm_request_idle(dev); 524 __pm_runtime_idle(dev, RPM_ASYNC);
464 525
465 out: 526 out:
466 if (parent) { 527 if (parent) {
@@ -517,7 +578,7 @@ static void pm_runtime_work(struct work_struct *work)
517 case RPM_REQ_NONE: 578 case RPM_REQ_NONE:
518 break; 579 break;
519 case RPM_REQ_IDLE: 580 case RPM_REQ_IDLE:
520 __pm_runtime_idle(dev); 581 __pm_runtime_idle(dev, RPM_NOWAIT);
521 break; 582 break;
522 case RPM_REQ_SUSPEND: 583 case RPM_REQ_SUSPEND:
523 __pm_runtime_suspend(dev, RPM_NOWAIT); 584 __pm_runtime_suspend(dev, RPM_NOWAIT);
@@ -532,47 +593,6 @@ static void pm_runtime_work(struct work_struct *work)
532} 593}
533 594
534/** 595/**
535 * __pm_request_idle - Submit an idle notification request for given device.
536 * @dev: Device to handle.
537 *
538 * Check if the device's run-time PM status is correct for suspending the device
539 * and queue up a request to run __pm_runtime_idle() for it.
540 *
541 * This function must be called under dev->power.lock with interrupts disabled.
542 */
543static int __pm_request_idle(struct device *dev)
544{
545 int retval = 0;
546
547 if (dev->power.runtime_error)
548 retval = -EINVAL;
549 else if (atomic_read(&dev->power.usage_count) > 0
550 || dev->power.disable_depth > 0
551 || dev->power.runtime_status == RPM_SUSPENDED
552 || dev->power.runtime_status == RPM_SUSPENDING)
553 retval = -EAGAIN;
554 else if (!pm_children_suspended(dev))
555 retval = -EBUSY;
556 if (retval)
557 return retval;
558
559 if (dev->power.request_pending) {
560 /* Any requests other then RPM_REQ_IDLE take precedence. */
561 if (dev->power.request == RPM_REQ_NONE)
562 dev->power.request = RPM_REQ_IDLE;
563 else if (dev->power.request != RPM_REQ_IDLE)
564 retval = -EAGAIN;
565 return retval;
566 }
567
568 dev->power.request = RPM_REQ_IDLE;
569 dev->power.request_pending = true;
570 queue_work(pm_wq, &dev->power.work);
571
572 return retval;
573}
574
575/**
576 * pm_request_idle - Submit an idle notification request for given device. 596 * pm_request_idle - Submit an idle notification request for given device.
577 * @dev: Device to handle. 597 * @dev: Device to handle.
578 */ 598 */
@@ -582,7 +602,7 @@ int pm_request_idle(struct device *dev)
582 int retval; 602 int retval;
583 603
584 spin_lock_irqsave(&dev->power.lock, flags); 604 spin_lock_irqsave(&dev->power.lock, flags);
585 retval = __pm_request_idle(dev); 605 retval = __pm_runtime_idle(dev, RPM_ASYNC);
586 spin_unlock_irqrestore(&dev->power.lock, flags); 606 spin_unlock_irqrestore(&dev->power.lock, flags);
587 607
588 return retval; 608 return retval;
@@ -590,59 +610,10 @@ int pm_request_idle(struct device *dev)
590EXPORT_SYMBOL_GPL(pm_request_idle); 610EXPORT_SYMBOL_GPL(pm_request_idle);
591 611
592/** 612/**
593 * __pm_request_suspend - Submit a suspend request for given device.
594 * @dev: Device to suspend.
595 *
596 * This function must be called under dev->power.lock with interrupts disabled.
597 */
598static int __pm_request_suspend(struct device *dev)
599{
600 int retval = 0;
601
602 if (dev->power.runtime_error)
603 return -EINVAL;
604
605 if (dev->power.runtime_status == RPM_SUSPENDED)
606 retval = 1;
607 else if (atomic_read(&dev->power.usage_count) > 0
608 || dev->power.disable_depth > 0)
609 retval = -EAGAIN;
610 else if (dev->power.runtime_status == RPM_SUSPENDING)
611 retval = -EINPROGRESS;
612 else if (!pm_children_suspended(dev))
613 retval = -EBUSY;
614 if (retval < 0)
615 return retval;
616
617 pm_runtime_deactivate_timer(dev);
618
619 if (dev->power.request_pending) {
620 /*
621 * Pending resume requests take precedence over us, but we can
622 * overtake any other pending request.
623 */
624 if (dev->power.request == RPM_REQ_RESUME)
625 retval = -EAGAIN;
626 else if (dev->power.request != RPM_REQ_SUSPEND)
627 dev->power.request = retval ?
628 RPM_REQ_NONE : RPM_REQ_SUSPEND;
629 return retval;
630 } else if (retval) {
631 return retval;
632 }
633
634 dev->power.request = RPM_REQ_SUSPEND;
635 dev->power.request_pending = true;
636 queue_work(pm_wq, &dev->power.work);
637
638 return 0;
639}
640
641/**
642 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 613 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
643 * @data: Device pointer passed by pm_schedule_suspend(). 614 * @data: Device pointer passed by pm_schedule_suspend().
644 * 615 *
645 * Check if the time is right and execute __pm_request_suspend() in that case. 616 * Check if the time is right and queue a suspend request.
646 */ 617 */
647static void pm_suspend_timer_fn(unsigned long data) 618static void pm_suspend_timer_fn(unsigned long data)
648{ 619{
@@ -656,7 +627,7 @@ static void pm_suspend_timer_fn(unsigned long data)
656 /* If 'expire' is after 'jiffies' we've been called too early. */ 627 /* If 'expire' is after 'jiffies' we've been called too early. */
657 if (expires > 0 && !time_after(expires, jiffies)) { 628 if (expires > 0 && !time_after(expires, jiffies)) {
658 dev->power.timer_expires = 0; 629 dev->power.timer_expires = 0;
659 __pm_request_suspend(dev); 630 __pm_runtime_suspend(dev, RPM_ASYNC);
660 } 631 }
661 632
662 spin_unlock_irqrestore(&dev->power.lock, flags); 633 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -670,47 +641,24 @@ static void pm_suspend_timer_fn(unsigned long data)
670int pm_schedule_suspend(struct device *dev, unsigned int delay) 641int pm_schedule_suspend(struct device *dev, unsigned int delay)
671{ 642{
672 unsigned long flags; 643 unsigned long flags;
673 int retval = 0; 644 int retval;
674 645
675 spin_lock_irqsave(&dev->power.lock, flags); 646 spin_lock_irqsave(&dev->power.lock, flags);
676 647
677 if (dev->power.runtime_error) {
678 retval = -EINVAL;
679 goto out;
680 }
681
682 if (!delay) { 648 if (!delay) {
683 retval = __pm_request_suspend(dev); 649 retval = __pm_runtime_suspend(dev, RPM_ASYNC);
684 goto out; 650 goto out;
685 } 651 }
686 652
687 pm_runtime_deactivate_timer(dev); 653 retval = rpm_check_suspend_allowed(dev);
688
689 if (dev->power.request_pending) {
690 /*
691 * Pending resume requests take precedence over us, but any
692 * other pending requests have to be canceled.
693 */
694 if (dev->power.request == RPM_REQ_RESUME) {
695 retval = -EAGAIN;
696 goto out;
697 }
698 dev->power.request = RPM_REQ_NONE;
699 }
700
701 if (dev->power.runtime_status == RPM_SUSPENDED)
702 retval = 1;
703 else if (atomic_read(&dev->power.usage_count) > 0
704 || dev->power.disable_depth > 0)
705 retval = -EAGAIN;
706 else if (!pm_children_suspended(dev))
707 retval = -EBUSY;
708 if (retval) 654 if (retval)
709 goto out; 655 goto out;
710 656
657 /* Other scheduled or pending requests need to be canceled. */
658 pm_runtime_cancel_pending(dev);
659
711 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 660 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
712 if (!dev->power.timer_expires) 661 dev->power.timer_expires += !dev->power.timer_expires;
713 dev->power.timer_expires = 1;
714 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 662 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
715 663
716 out: 664 out:
@@ -723,49 +671,6 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
723/** 671/**
724 * pm_request_resume - Submit a resume request for given device. 672 * pm_request_resume - Submit a resume request for given device.
725 * @dev: Device to resume. 673 * @dev: Device to resume.
726 *
727 * This function must be called under dev->power.lock with interrupts disabled.
728 */
729static int __pm_request_resume(struct device *dev)
730{
731 int retval = 0;
732
733 if (dev->power.runtime_error)
734 return -EINVAL;
735
736 if (dev->power.runtime_status == RPM_ACTIVE)
737 retval = 1;
738 else if (dev->power.runtime_status == RPM_RESUMING)
739 retval = -EINPROGRESS;
740 else if (dev->power.disable_depth > 0)
741 retval = -EAGAIN;
742 if (retval < 0)
743 return retval;
744
745 pm_runtime_deactivate_timer(dev);
746
747 if (dev->power.runtime_status == RPM_SUSPENDING) {
748 dev->power.deferred_resume = true;
749 return retval;
750 }
751 if (dev->power.request_pending) {
752 /* If non-resume request is pending, we can overtake it. */
753 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
754 return retval;
755 }
756 if (retval)
757 return retval;
758
759 dev->power.request = RPM_REQ_RESUME;
760 dev->power.request_pending = true;
761 queue_work(pm_wq, &dev->power.work);
762
763 return retval;
764}
765
766/**
767 * pm_request_resume - Submit a resume request for given device.
768 * @dev: Device to resume.
769 */ 674 */
770int pm_request_resume(struct device *dev) 675int pm_request_resume(struct device *dev)
771{ 676{
@@ -773,7 +678,7 @@ int pm_request_resume(struct device *dev)
773 int retval; 678 int retval;
774 679
775 spin_lock_irqsave(&dev->power.lock, flags); 680 spin_lock_irqsave(&dev->power.lock, flags);
776 retval = __pm_request_resume(dev); 681 retval = __pm_runtime_resume(dev, RPM_ASYNC);
777 spin_unlock_irqrestore(&dev->power.lock, flags); 682 spin_unlock_irqrestore(&dev->power.lock, flags);
778 683
779 return retval; 684 return retval;
@@ -1088,7 +993,7 @@ void pm_runtime_allow(struct device *dev)
1088 993
1089 dev->power.runtime_auto = true; 994 dev->power.runtime_auto = true;
1090 if (atomic_dec_and_test(&dev->power.usage_count)) 995 if (atomic_dec_and_test(&dev->power.usage_count))
1091 __pm_runtime_idle(dev); 996 __pm_runtime_idle(dev, 0);
1092 997
1093 out: 998 out:
1094 spin_unlock_irq(&dev->power.lock); 999 spin_unlock_irq(&dev->power.lock);