aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-10-07 17:16:55 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-10-07 17:16:55 -0400
commitd727b60659a1173eb4142a5fc521ce67c28b34e1 (patch)
tree793d0fc86ecf98331024db5c86bc271cc72aa2d2 /drivers/base
parent3ee72ca99288f1de95ec9c570e43f531c8799f06 (diff)
parent2a5306cc5f383b0e7414c75e458111afd4a563a4 (diff)
Merge branch 'pm-runtime' into pm-for-linus
* pm-runtime: PM / Tracing: build rpm-traces.c only if CONFIG_PM_RUNTIME is set PM / Runtime: Replace dev_dbg() with trace_rpm_*() PM / Runtime: Introduce trace points for tracing rpm_* functions PM / Runtime: Don't run callbacks under lock for power.irq_safe set USB: Add wakeup info to debugging messages PM / Runtime: pm_runtime_idle() can be called in atomic context PM / Runtime: Add macro to test for runtime PM events PM / Runtime: Add might_sleep() to runtime PM functions
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/runtime.c94
1 files changed, 65 insertions, 29 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b8079..7a6fb5e34a0e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/pm_runtime.h> 11#include <linux/pm_runtime.h>
12#include <trace/events/rpm.h>
12#include "power.h" 13#include "power.h"
13 14
14static int rpm_resume(struct device *dev, int rpmflags); 15static int rpm_resume(struct device *dev, int rpmflags);
@@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
155} 156}
156 157
157/** 158/**
159 * __rpm_callback - Run a given runtime PM callback for a given device.
160 * @cb: Runtime PM callback to run.
161 * @dev: Device to run the callback for.
162 */
163static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
164 __releases(&dev->power.lock) __acquires(&dev->power.lock)
165{
166 int retval;
167
168 if (dev->power.irq_safe)
169 spin_unlock(&dev->power.lock);
170 else
171 spin_unlock_irq(&dev->power.lock);
172
173 retval = cb(dev);
174
175 if (dev->power.irq_safe)
176 spin_lock(&dev->power.lock);
177 else
178 spin_lock_irq(&dev->power.lock);
179
180 return retval;
181}
182
183/**
158 * rpm_idle - Notify device bus type if the device can be suspended. 184 * rpm_idle - Notify device bus type if the device can be suspended.
159 * @dev: Device to notify the bus type about. 185 * @dev: Device to notify the bus type about.
160 * @rpmflags: Flag bits. 186 * @rpmflags: Flag bits.
@@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
171 int (*callback)(struct device *); 197 int (*callback)(struct device *);
172 int retval; 198 int retval;
173 199
200 trace_rpm_idle(dev, rpmflags);
174 retval = rpm_check_suspend_allowed(dev); 201 retval = rpm_check_suspend_allowed(dev);
175 if (retval < 0) 202 if (retval < 0)
176 ; /* Conditions are wrong. */ 203 ; /* Conditions are wrong. */
@@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
225 else 252 else
226 callback = NULL; 253 callback = NULL;
227 254
228 if (callback) { 255 if (callback)
229 if (dev->power.irq_safe) 256 __rpm_callback(callback, dev);
230 spin_unlock(&dev->power.lock);
231 else
232 spin_unlock_irq(&dev->power.lock);
233
234 callback(dev);
235
236 if (dev->power.irq_safe)
237 spin_lock(&dev->power.lock);
238 else
239 spin_lock_irq(&dev->power.lock);
240 }
241 257
242 dev->power.idle_notification = false; 258 dev->power.idle_notification = false;
243 wake_up_all(&dev->power.wait_queue); 259 wake_up_all(&dev->power.wait_queue);
244 260
245 out: 261 out:
262 trace_rpm_return_int(dev, _THIS_IP_, retval);
246 return retval; 263 return retval;
247} 264}
248 265
@@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
252 * @dev: Device to run the callback for. 269 * @dev: Device to run the callback for.
253 */ 270 */
254static int rpm_callback(int (*cb)(struct device *), struct device *dev) 271static int rpm_callback(int (*cb)(struct device *), struct device *dev)
255 __releases(&dev->power.lock) __acquires(&dev->power.lock)
256{ 272{
257 int retval; 273 int retval;
258 274
259 if (!cb) 275 if (!cb)
260 return -ENOSYS; 276 return -ENOSYS;
261 277
262 if (dev->power.irq_safe) { 278 retval = __rpm_callback(cb, dev);
263 retval = cb(dev);
264 } else {
265 spin_unlock_irq(&dev->power.lock);
266
267 retval = cb(dev);
268 279
269 spin_lock_irq(&dev->power.lock);
270 }
271 dev->power.runtime_error = retval; 280 dev->power.runtime_error = retval;
272 return retval != -EACCES ? retval : -EIO; 281 return retval != -EACCES ? retval : -EIO;
273} 282}
@@ -295,7 +304,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
295 struct device *parent = NULL; 304 struct device *parent = NULL;
296 int retval; 305 int retval;
297 306
298 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 307 trace_rpm_suspend(dev, rpmflags);
299 308
300 repeat: 309 repeat:
301 retval = rpm_check_suspend_allowed(dev); 310 retval = rpm_check_suspend_allowed(dev);
@@ -347,6 +356,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
347 goto out; 356 goto out;
348 } 357 }
349 358
359 if (dev->power.irq_safe) {
360 spin_unlock(&dev->power.lock);
361
362 cpu_relax();
363
364 spin_lock(&dev->power.lock);
365 goto repeat;
366 }
367
350 /* Wait for the other suspend running in parallel with us. */ 368 /* Wait for the other suspend running in parallel with us. */
351 for (;;) { 369 for (;;) {
352 prepare_to_wait(&dev->power.wait_queue, &wait, 370 prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -430,7 +448,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
430 } 448 }
431 449
432 out: 450 out:
433 dev_dbg(dev, "%s returns %d\n", __func__, retval); 451 trace_rpm_return_int(dev, _THIS_IP_, retval);
434 452
435 return retval; 453 return retval;
436} 454}
@@ -459,7 +477,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
459 struct device *parent = NULL; 477 struct device *parent = NULL;
460 int retval = 0; 478 int retval = 0;
461 479
462 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); 480 trace_rpm_resume(dev, rpmflags);
463 481
464 repeat: 482 repeat:
465 if (dev->power.runtime_error) 483 if (dev->power.runtime_error)
@@ -496,6 +514,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
496 goto out; 514 goto out;
497 } 515 }
498 516
517 if (dev->power.irq_safe) {
518 spin_unlock(&dev->power.lock);
519
520 cpu_relax();
521
522 spin_lock(&dev->power.lock);
523 goto repeat;
524 }
525
499 /* Wait for the operation carried out in parallel with us. */ 526 /* Wait for the operation carried out in parallel with us. */
500 for (;;) { 527 for (;;) {
501 prepare_to_wait(&dev->power.wait_queue, &wait, 528 prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -615,7 +642,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
615 spin_lock_irq(&dev->power.lock); 642 spin_lock_irq(&dev->power.lock);
616 } 643 }
617 644
618 dev_dbg(dev, "%s returns %d\n", __func__, retval); 645 trace_rpm_return_int(dev, _THIS_IP_, retval);
619 646
620 return retval; 647 return retval;
621} 648}
@@ -732,13 +759,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
732 * return immediately if it is larger than zero. Then carry out an idle 759 * return immediately if it is larger than zero. Then carry out an idle
733 * notification, either synchronous or asynchronous. 760 * notification, either synchronous or asynchronous.
734 * 761 *
735 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 762 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
763 * or if pm_runtime_irq_safe() has been called.
736 */ 764 */
737int __pm_runtime_idle(struct device *dev, int rpmflags) 765int __pm_runtime_idle(struct device *dev, int rpmflags)
738{ 766{
739 unsigned long flags; 767 unsigned long flags;
740 int retval; 768 int retval;
741 769
770 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
771
742 if (rpmflags & RPM_GET_PUT) { 772 if (rpmflags & RPM_GET_PUT) {
743 if (!atomic_dec_and_test(&dev->power.usage_count)) 773 if (!atomic_dec_and_test(&dev->power.usage_count))
744 return 0; 774 return 0;
@@ -761,13 +791,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
761 * return immediately if it is larger than zero. Then carry out a suspend, 791 * return immediately if it is larger than zero. Then carry out a suspend,
762 * either synchronous or asynchronous. 792 * either synchronous or asynchronous.
763 * 793 *
764 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 794 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
795 * or if pm_runtime_irq_safe() has been called.
765 */ 796 */
766int __pm_runtime_suspend(struct device *dev, int rpmflags) 797int __pm_runtime_suspend(struct device *dev, int rpmflags)
767{ 798{
768 unsigned long flags; 799 unsigned long flags;
769 int retval; 800 int retval;
770 801
802 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
803
771 if (rpmflags & RPM_GET_PUT) { 804 if (rpmflags & RPM_GET_PUT) {
772 if (!atomic_dec_and_test(&dev->power.usage_count)) 805 if (!atomic_dec_and_test(&dev->power.usage_count))
773 return 0; 806 return 0;
@@ -789,13 +822,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
789 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 822 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
790 * carry out a resume, either synchronous or asynchronous. 823 * carry out a resume, either synchronous or asynchronous.
791 * 824 *
792 * This routine may be called in atomic context if the RPM_ASYNC flag is set. 825 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
826 * or if pm_runtime_irq_safe() has been called.
793 */ 827 */
794int __pm_runtime_resume(struct device *dev, int rpmflags) 828int __pm_runtime_resume(struct device *dev, int rpmflags)
795{ 829{
796 unsigned long flags; 830 unsigned long flags;
797 int retval; 831 int retval;
798 832
833 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
834
799 if (rpmflags & RPM_GET_PUT) 835 if (rpmflags & RPM_GET_PUT)
800 atomic_inc(&dev->power.usage_count); 836 atomic_inc(&dev->power.usage_count);
801 837