diff options
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r-- | drivers/base/power/main.c | 83 |
1 files changed, 44 insertions, 39 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index fbc5b6e7c59..aa632020774 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -63,6 +63,7 @@ void device_pm_init(struct device *dev) | |||
63 | dev->power.wakeup = NULL; | 63 | dev->power.wakeup = NULL; |
64 | spin_lock_init(&dev->power.lock); | 64 | spin_lock_init(&dev->power.lock); |
65 | pm_runtime_init(dev); | 65 | pm_runtime_init(dev); |
66 | INIT_LIST_HEAD(&dev->power.entry); | ||
66 | } | 67 | } |
67 | 68 | ||
68 | /** | 69 | /** |
@@ -425,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
425 | 426 | ||
426 | if (dev->pwr_domain) { | 427 | if (dev->pwr_domain) { |
427 | pm_dev_dbg(dev, state, "EARLY power domain "); | 428 | pm_dev_dbg(dev, state, "EARLY power domain "); |
428 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | 429 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
429 | } | 430 | } else if (dev->type && dev->type->pm) { |
430 | |||
431 | if (dev->type && dev->type->pm) { | ||
432 | pm_dev_dbg(dev, state, "EARLY type "); | 431 | pm_dev_dbg(dev, state, "EARLY type "); |
433 | error = pm_noirq_op(dev, dev->type->pm, state); | 432 | error = pm_noirq_op(dev, dev->type->pm, state); |
434 | } else if (dev->class && dev->class->pm) { | 433 | } else if (dev->class && dev->class->pm) { |
@@ -516,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
516 | 515 | ||
517 | if (dev->pwr_domain) { | 516 | if (dev->pwr_domain) { |
518 | pm_dev_dbg(dev, state, "power domain "); | 517 | pm_dev_dbg(dev, state, "power domain "); |
519 | pm_op(dev, &dev->pwr_domain->ops, state); | 518 | error = pm_op(dev, &dev->pwr_domain->ops, state); |
519 | goto End; | ||
520 | } | 520 | } |
521 | 521 | ||
522 | if (dev->type && dev->type->pm) { | 522 | if (dev->type && dev->type->pm) { |
@@ -579,11 +579,13 @@ static bool is_async(struct device *dev) | |||
579 | * Execute the appropriate "resume" callback for all devices whose status | 579 | * Execute the appropriate "resume" callback for all devices whose status |
580 | * indicates that they are suspended. | 580 | * indicates that they are suspended. |
581 | */ | 581 | */ |
582 | static void dpm_resume(pm_message_t state) | 582 | void dpm_resume(pm_message_t state) |
583 | { | 583 | { |
584 | struct device *dev; | 584 | struct device *dev; |
585 | ktime_t starttime = ktime_get(); | 585 | ktime_t starttime = ktime_get(); |
586 | 586 | ||
587 | might_sleep(); | ||
588 | |||
587 | mutex_lock(&dpm_list_mtx); | 589 | mutex_lock(&dpm_list_mtx); |
588 | pm_transition = state; | 590 | pm_transition = state; |
589 | async_error = 0; | 591 | async_error = 0; |
@@ -628,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
628 | { | 630 | { |
629 | device_lock(dev); | 631 | device_lock(dev); |
630 | 632 | ||
631 | if (dev->pwr_domain && dev->pwr_domain->ops.complete) { | 633 | if (dev->pwr_domain) { |
632 | pm_dev_dbg(dev, state, "completing power domain "); | 634 | pm_dev_dbg(dev, state, "completing power domain "); |
633 | dev->pwr_domain->ops.complete(dev); | 635 | if (dev->pwr_domain->ops.complete) |
634 | } | 636 | dev->pwr_domain->ops.complete(dev); |
635 | 637 | } else if (dev->type && dev->type->pm) { | |
636 | if (dev->type && dev->type->pm) { | ||
637 | pm_dev_dbg(dev, state, "completing type "); | 638 | pm_dev_dbg(dev, state, "completing type "); |
638 | if (dev->type->pm->complete) | 639 | if (dev->type->pm->complete) |
639 | dev->type->pm->complete(dev); | 640 | dev->type->pm->complete(dev); |
@@ -657,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
657 | * Execute the ->complete() callbacks for all devices whose PM status is not | 658 | * Execute the ->complete() callbacks for all devices whose PM status is not |
658 | * DPM_ON (this allows new devices to be registered). | 659 | * DPM_ON (this allows new devices to be registered). |
659 | */ | 660 | */ |
660 | static void dpm_complete(pm_message_t state) | 661 | void dpm_complete(pm_message_t state) |
661 | { | 662 | { |
662 | struct list_head list; | 663 | struct list_head list; |
663 | 664 | ||
665 | might_sleep(); | ||
666 | |||
664 | INIT_LIST_HEAD(&list); | 667 | INIT_LIST_HEAD(&list); |
665 | mutex_lock(&dpm_list_mtx); | 668 | mutex_lock(&dpm_list_mtx); |
666 | while (!list_empty(&dpm_prepared_list)) { | 669 | while (!list_empty(&dpm_prepared_list)) { |
@@ -689,7 +692,6 @@ static void dpm_complete(pm_message_t state) | |||
689 | */ | 692 | */ |
690 | void dpm_resume_end(pm_message_t state) | 693 | void dpm_resume_end(pm_message_t state) |
691 | { | 694 | { |
692 | might_sleep(); | ||
693 | dpm_resume(state); | 695 | dpm_resume(state); |
694 | dpm_complete(state); | 696 | dpm_complete(state); |
695 | } | 697 | } |
@@ -731,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
731 | { | 733 | { |
732 | int error; | 734 | int error; |
733 | 735 | ||
734 | if (dev->type && dev->type->pm) { | 736 | if (dev->pwr_domain) { |
737 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
738 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
739 | if (error) | ||
740 | return error; | ||
741 | } else if (dev->type && dev->type->pm) { | ||
735 | pm_dev_dbg(dev, state, "LATE type "); | 742 | pm_dev_dbg(dev, state, "LATE type "); |
736 | error = pm_noirq_op(dev, dev->type->pm, state); | 743 | error = pm_noirq_op(dev, dev->type->pm, state); |
737 | if (error) | 744 | if (error) |
@@ -748,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
748 | return error; | 755 | return error; |
749 | } | 756 | } |
750 | 757 | ||
751 | if (dev->pwr_domain) { | ||
752 | pm_dev_dbg(dev, state, "LATE power domain "); | ||
753 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
754 | } | ||
755 | |||
756 | return 0; | 758 | return 0; |
757 | } | 759 | } |
758 | 760 | ||
@@ -840,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
840 | goto End; | 842 | goto End; |
841 | } | 843 | } |
842 | 844 | ||
845 | if (dev->pwr_domain) { | ||
846 | pm_dev_dbg(dev, state, "power domain "); | ||
847 | error = pm_op(dev, &dev->pwr_domain->ops, state); | ||
848 | goto End; | ||
849 | } | ||
850 | |||
843 | if (dev->type && dev->type->pm) { | 851 | if (dev->type && dev->type->pm) { |
844 | pm_dev_dbg(dev, state, "type "); | 852 | pm_dev_dbg(dev, state, "type "); |
845 | error = pm_op(dev, dev->type->pm, state); | 853 | error = pm_op(dev, dev->type->pm, state); |
846 | goto Domain; | 854 | goto End; |
847 | } | 855 | } |
848 | 856 | ||
849 | if (dev->class) { | 857 | if (dev->class) { |
850 | if (dev->class->pm) { | 858 | if (dev->class->pm) { |
851 | pm_dev_dbg(dev, state, "class "); | 859 | pm_dev_dbg(dev, state, "class "); |
852 | error = pm_op(dev, dev->class->pm, state); | 860 | error = pm_op(dev, dev->class->pm, state); |
853 | goto Domain; | 861 | goto End; |
854 | } else if (dev->class->suspend) { | 862 | } else if (dev->class->suspend) { |
855 | pm_dev_dbg(dev, state, "legacy class "); | 863 | pm_dev_dbg(dev, state, "legacy class "); |
856 | error = legacy_suspend(dev, state, dev->class->suspend); | 864 | error = legacy_suspend(dev, state, dev->class->suspend); |
857 | goto Domain; | 865 | goto End; |
858 | } | 866 | } |
859 | } | 867 | } |
860 | 868 | ||
@@ -868,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
868 | } | 876 | } |
869 | } | 877 | } |
870 | 878 | ||
871 | Domain: | ||
872 | if (!error && dev->pwr_domain) { | ||
873 | pm_dev_dbg(dev, state, "power domain "); | ||
874 | pm_op(dev, &dev->pwr_domain->ops, state); | ||
875 | } | ||
876 | |||
877 | End: | 879 | End: |
878 | device_unlock(dev); | 880 | device_unlock(dev); |
879 | complete_all(&dev->power.completion); | 881 | complete_all(&dev->power.completion); |
@@ -913,11 +915,13 @@ static int device_suspend(struct device *dev) | |||
913 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 915 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
914 | * @state: PM transition of the system being carried out. | 916 | * @state: PM transition of the system being carried out. |
915 | */ | 917 | */ |
916 | static int dpm_suspend(pm_message_t state) | 918 | int dpm_suspend(pm_message_t state) |
917 | { | 919 | { |
918 | ktime_t starttime = ktime_get(); | 920 | ktime_t starttime = ktime_get(); |
919 | int error = 0; | 921 | int error = 0; |
920 | 922 | ||
923 | might_sleep(); | ||
924 | |||
921 | mutex_lock(&dpm_list_mtx); | 925 | mutex_lock(&dpm_list_mtx); |
922 | pm_transition = state; | 926 | pm_transition = state; |
923 | async_error = 0; | 927 | async_error = 0; |
@@ -964,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
964 | 968 | ||
965 | device_lock(dev); | 969 | device_lock(dev); |
966 | 970 | ||
967 | if (dev->type && dev->type->pm) { | 971 | if (dev->pwr_domain) { |
972 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
973 | if (dev->pwr_domain->ops.prepare) | ||
974 | error = dev->pwr_domain->ops.prepare(dev); | ||
975 | suspend_report_result(dev->pwr_domain->ops.prepare, error); | ||
976 | if (error) | ||
977 | goto End; | ||
978 | } else if (dev->type && dev->type->pm) { | ||
968 | pm_dev_dbg(dev, state, "preparing type "); | 979 | pm_dev_dbg(dev, state, "preparing type "); |
969 | if (dev->type->pm->prepare) | 980 | if (dev->type->pm->prepare) |
970 | error = dev->type->pm->prepare(dev); | 981 | error = dev->type->pm->prepare(dev); |
@@ -983,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
983 | if (dev->bus->pm->prepare) | 994 | if (dev->bus->pm->prepare) |
984 | error = dev->bus->pm->prepare(dev); | 995 | error = dev->bus->pm->prepare(dev); |
985 | suspend_report_result(dev->bus->pm->prepare, error); | 996 | suspend_report_result(dev->bus->pm->prepare, error); |
986 | if (error) | ||
987 | goto End; | ||
988 | } | ||
989 | |||
990 | if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { | ||
991 | pm_dev_dbg(dev, state, "preparing power domain "); | ||
992 | dev->pwr_domain->ops.prepare(dev); | ||
993 | } | 997 | } |
994 | 998 | ||
995 | End: | 999 | End: |
@@ -1004,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1004 | * | 1008 | * |
1005 | * Execute the ->prepare() callback(s) for all devices. | 1009 | * Execute the ->prepare() callback(s) for all devices. |
1006 | */ | 1010 | */ |
1007 | static int dpm_prepare(pm_message_t state) | 1011 | int dpm_prepare(pm_message_t state) |
1008 | { | 1012 | { |
1009 | int error = 0; | 1013 | int error = 0; |
1010 | 1014 | ||
1015 | might_sleep(); | ||
1016 | |||
1011 | mutex_lock(&dpm_list_mtx); | 1017 | mutex_lock(&dpm_list_mtx); |
1012 | while (!list_empty(&dpm_list)) { | 1018 | while (!list_empty(&dpm_list)) { |
1013 | struct device *dev = to_device(dpm_list.next); | 1019 | struct device *dev = to_device(dpm_list.next); |
@@ -1056,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state) | |||
1056 | { | 1062 | { |
1057 | int error; | 1063 | int error; |
1058 | 1064 | ||
1059 | might_sleep(); | ||
1060 | error = dpm_prepare(state); | 1065 | error = dpm_prepare(state); |
1061 | if (!error) | 1066 | if (!error) |
1062 | error = dpm_suspend(state); | 1067 | error = dpm_suspend(state); |