aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2011-04-28 18:35:50 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-04-28 18:35:50 -0400
commit4d27e9dcff00a6425d779b065ec8892e4f391661 (patch)
tree6602f1dcfad3e3065a1c081cf89f2d020976600d /drivers/base
parentfafc9929c668f8bae6dd1f109f33a86d2cb3c460 (diff)
PM: Make power domain callbacks take precedence over subsystem ones
Change the PM core's behavior related to power domains in such a way that, if a power domain is defined for a given device, its callbacks will be executed instead of and not in addition to the device subsystem's PM callbacks. The idea behind the initial implementation of power domains handling by the PM core was that power domain callbacks would be executed in addition to subsystem callbacks, so that it would be possible to extend the subsystem callbacks by using power domains. It turns out, however, that this wouldn't be really convenient in some important situations. For example, there are systems in which power can only be removed from entire power domains. On those systems it is not desirable to execute device drivers' PM callbacks until it is known that power is going to be removed from the devices in question, which means that they should be executed by power domain callbacks rather then by subsystem (e.g. bus type) PM callbacks, because subsystems generally have no information about what devices belong to which power domain. Thus, for instance, if the bus type in question is the platform bus type, its PM callbacks generally should not be called in addition to power domain callbacks, because they run device drivers' callbacks unconditionally if defined. While in principle the default subsystem PM callbacks, or a subset of them, may be replaced with different functions, it doesn't seem correct to do so, because that would change the subsystem's behavior with respect to all devices in the system, regardless of whether or not they belong to any power domains. Thus, the only remaining option is to make power domain callbacks take precedence over subsystem callbacks. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Kevin Hilman <khilman@ti.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c64
-rw-r--r--drivers/base/power/runtime.c29
2 files changed, 41 insertions, 52 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..3b354560f306 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -629,12 +628,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 628{
630 device_lock(dev); 629 device_lock(dev);
631 630
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 631 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 632 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 633 if (dev->pwr_domain->ops.complete)
635 } 634 dev->pwr_domain->ops.complete(dev);
636 635 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 636 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 637 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 638 dev->type->pm->complete(dev);
@@ -732,7 +730,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 730{
733 int error; 731 int error;
734 732
735 if (dev->type && dev->type->pm) { 733 if (dev->pwr_domain) {
734 pm_dev_dbg(dev, state, "LATE power domain ");
735 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
736 if (error)
737 return error;
738 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 739 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 740 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 741 if (error)
@@ -749,11 +752,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 752 return error;
750 } 753 }
751 754
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 755 return 0;
758} 756}
759 757
@@ -841,21 +839,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 839 goto End;
842 } 840 }
843 841
842 if (dev->pwr_domain) {
843 pm_dev_dbg(dev, state, "power domain ");
844 error = pm_op(dev, &dev->pwr_domain->ops, state);
845 goto End;
846 }
847
844 if (dev->type && dev->type->pm) { 848 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 849 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 850 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 851 goto End;
848 } 852 }
849 853
850 if (dev->class) { 854 if (dev->class) {
851 if (dev->class->pm) { 855 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 856 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 857 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 858 goto End;
855 } else if (dev->class->suspend) { 859 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 860 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 861 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 862 goto End;
859 } 863 }
860 } 864 }
861 865
@@ -869,12 +873,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 873 }
870 } 874 }
871 875
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 876 End:
879 device_unlock(dev); 877 device_unlock(dev);
880 complete_all(&dev->power.completion); 878 complete_all(&dev->power.completion);
@@ -965,7 +963,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 963
966 device_lock(dev); 964 device_lock(dev);
967 965
968 if (dev->type && dev->type->pm) { 966 if (dev->pwr_domain) {
967 pm_dev_dbg(dev, state, "preparing power domain ");
968 if (dev->pwr_domain->ops.prepare)
969 error = dev->pwr_domain->ops.prepare(dev);
970 suspend_report_result(dev->pwr_domain->ops.prepare, error);
971 if (error)
972 goto End;
973 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 974 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 975 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 976 error = dev->type->pm->prepare(dev);
@@ -984,13 +989,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 989 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 990 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 991 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 992 }
995 993
996 End: 994 End:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;