aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/main.c143
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/runtime.c45
-rw-r--r--drivers/base/power/sysfs.c100
4 files changed, 280 insertions, 14 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a5142bddef41..0e26a6f6fd48 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,6 +25,7 @@
25#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/async.h>
28 29
29#include "../base.h" 30#include "../base.h"
30#include "power.h" 31#include "power.h"
@@ -42,6 +43,7 @@
42LIST_HEAD(dpm_list); 43LIST_HEAD(dpm_list);
43 44
44static DEFINE_MUTEX(dpm_list_mtx); 45static DEFINE_MUTEX(dpm_list_mtx);
46static pm_message_t pm_transition;
45 47
46/* 48/*
47 * Set once the preparation of devices for a PM transition has started, reset 49 * Set once the preparation of devices for a PM transition has started, reset
@@ -56,6 +58,7 @@ static bool transition_started;
56void device_pm_init(struct device *dev) 58void device_pm_init(struct device *dev)
57{ 59{
58 dev->power.status = DPM_ON; 60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion);
59 pm_runtime_init(dev); 62 pm_runtime_init(dev);
60} 63}
61 64
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev)
111 pr_debug("PM: Removing info for %s:%s\n", 114 pr_debug("PM: Removing info for %s:%s\n",
112 dev->bus ? dev->bus->name : "No Bus", 115 dev->bus ? dev->bus->name : "No Bus",
113 kobject_name(&dev->kobj)); 116 kobject_name(&dev->kobj));
117 complete_all(&dev->power.completion);
114 mutex_lock(&dpm_list_mtx); 118 mutex_lock(&dpm_list_mtx);
115 list_del_init(&dev->power.entry); 119 list_del_init(&dev->power.entry);
116 mutex_unlock(&dpm_list_mtx); 120 mutex_unlock(&dpm_list_mtx);
@@ -188,6 +192,31 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
188} 192}
189 193
190/** 194/**
195 * dpm_wait - Wait for a PM operation to complete.
196 * @dev: Device to wait for.
197 * @async: If unset, wait only if the device's power.async_suspend flag is set.
198 */
199static void dpm_wait(struct device *dev, bool async)
200{
201 if (!dev)
202 return;
203
204 if (async || (pm_async_enabled && dev->power.async_suspend))
205 wait_for_completion(&dev->power.completion);
206}
207
208static int dpm_wait_fn(struct device *dev, void *async_ptr)
209{
210 dpm_wait(dev, *((bool *)async_ptr));
211 return 0;
212}
213
214static void dpm_wait_for_children(struct device *dev, bool async)
215{
216 device_for_each_child(dev, &async, dpm_wait_fn);
217}
218
219/**
191 * pm_op - Execute the PM operation appropriate for given PM event. 220 * pm_op - Execute the PM operation appropriate for given PM event.
192 * @dev: Device to handle. 221 * @dev: Device to handle.
193 * @ops: PM operations to choose from. 222 * @ops: PM operations to choose from.
@@ -271,8 +300,9 @@ static int pm_noirq_op(struct device *dev,
271 ktime_t calltime, delta, rettime; 300 ktime_t calltime, delta, rettime;
272 301
273 if (initcall_debug) { 302 if (initcall_debug) {
274 pr_info("calling %s_i+ @ %i\n", 303 pr_info("calling %s+ @ %i, parent: %s\n",
275 dev_name(dev), task_pid_nr(current)); 304 dev_name(dev), task_pid_nr(current),
305 dev->parent ? dev_name(dev->parent) : "none");
276 calltime = ktime_get(); 306 calltime = ktime_get();
277 } 307 }
278 308
@@ -468,16 +498,20 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
468 * device_resume - Execute "resume" callbacks for given device. 498 * device_resume - Execute "resume" callbacks for given device.
469 * @dev: Device to handle. 499 * @dev: Device to handle.
470 * @state: PM transition of the system being carried out. 500 * @state: PM transition of the system being carried out.
501 * @async: If true, the device is being resumed asynchronously.
471 */ 502 */
472static int device_resume(struct device *dev, pm_message_t state) 503static int device_resume(struct device *dev, pm_message_t state, bool async)
473{ 504{
474 int error = 0; 505 int error = 0;
475 506
476 TRACE_DEVICE(dev); 507 TRACE_DEVICE(dev);
477 TRACE_RESUME(0); 508 TRACE_RESUME(0);
478 509
510 dpm_wait(dev->parent, async);
479 down(&dev->sem); 511 down(&dev->sem);
480 512
513 dev->power.status = DPM_RESUMING;
514
481 if (dev->bus) { 515 if (dev->bus) {
482 if (dev->bus->pm) { 516 if (dev->bus->pm) {
483 pm_dev_dbg(dev, state, ""); 517 pm_dev_dbg(dev, state, "");
@@ -510,11 +544,29 @@ static int device_resume(struct device *dev, pm_message_t state)
510 } 544 }
511 End: 545 End:
512 up(&dev->sem); 546 up(&dev->sem);
547 complete_all(&dev->power.completion);
513 548
514 TRACE_RESUME(error); 549 TRACE_RESUME(error);
515 return error; 550 return error;
516} 551}
517 552
553static void async_resume(void *data, async_cookie_t cookie)
554{
555 struct device *dev = (struct device *)data;
556 int error;
557
558 error = device_resume(dev, pm_transition, true);
559 if (error)
560 pm_dev_err(dev, pm_transition, " async", error);
561 put_device(dev);
562}
563
564static bool is_async(struct device *dev)
565{
566 return dev->power.async_suspend && pm_async_enabled
567 && !pm_trace_is_enabled();
568}
569
518/** 570/**
519 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 571 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
520 * @state: PM transition of the system being carried out. 572 * @state: PM transition of the system being carried out.
@@ -525,21 +577,33 @@ static int device_resume(struct device *dev, pm_message_t state)
525static void dpm_resume(pm_message_t state) 577static void dpm_resume(pm_message_t state)
526{ 578{
527 struct list_head list; 579 struct list_head list;
580 struct device *dev;
528 ktime_t starttime = ktime_get(); 581 ktime_t starttime = ktime_get();
529 582
530 INIT_LIST_HEAD(&list); 583 INIT_LIST_HEAD(&list);
531 mutex_lock(&dpm_list_mtx); 584 mutex_lock(&dpm_list_mtx);
532 while (!list_empty(&dpm_list)) { 585 pm_transition = state;
533 struct device *dev = to_device(dpm_list.next); 586
587 list_for_each_entry(dev, &dpm_list, power.entry) {
588 if (dev->power.status < DPM_OFF)
589 continue;
590
591 INIT_COMPLETION(dev->power.completion);
592 if (is_async(dev)) {
593 get_device(dev);
594 async_schedule(async_resume, dev);
595 }
596 }
534 597
598 while (!list_empty(&dpm_list)) {
599 dev = to_device(dpm_list.next);
535 get_device(dev); 600 get_device(dev);
536 if (dev->power.status >= DPM_OFF) { 601 if (dev->power.status >= DPM_OFF && !is_async(dev)) {
537 int error; 602 int error;
538 603
539 dev->power.status = DPM_RESUMING;
540 mutex_unlock(&dpm_list_mtx); 604 mutex_unlock(&dpm_list_mtx);
541 605
542 error = device_resume(dev, state); 606 error = device_resume(dev, state, false);
543 607
544 mutex_lock(&dpm_list_mtx); 608 mutex_lock(&dpm_list_mtx);
545 if (error) 609 if (error)
@@ -554,6 +618,7 @@ static void dpm_resume(pm_message_t state)
554 } 618 }
555 list_splice(&list, &dpm_list); 619 list_splice(&list, &dpm_list);
556 mutex_unlock(&dpm_list_mtx); 620 mutex_unlock(&dpm_list_mtx);
621 async_synchronize_full();
557 dpm_show_time(starttime, state, NULL); 622 dpm_show_time(starttime, state, NULL);
558} 623}
559 624
@@ -731,17 +796,24 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
731 return error; 796 return error;
732} 797}
733 798
799static int async_error;
800
734/** 801/**
735 * device_suspend - Execute "suspend" callbacks for given device. 802 * device_suspend - Execute "suspend" callbacks for given device.
736 * @dev: Device to handle. 803 * @dev: Device to handle.
737 * @state: PM transition of the system being carried out. 804 * @state: PM transition of the system being carried out.
805 * @async: If true, the device is being suspended asynchronously.
738 */ 806 */
739static int device_suspend(struct device *dev, pm_message_t state) 807static int __device_suspend(struct device *dev, pm_message_t state, bool async)
740{ 808{
741 int error = 0; 809 int error = 0;
742 810
811 dpm_wait_for_children(dev, async);
743 down(&dev->sem); 812 down(&dev->sem);
744 813
814 if (async_error)
815 goto End;
816
745 if (dev->class) { 817 if (dev->class) {
746 if (dev->class->pm) { 818 if (dev->class->pm) {
747 pm_dev_dbg(dev, state, "class "); 819 pm_dev_dbg(dev, state, "class ");
@@ -772,12 +844,44 @@ static int device_suspend(struct device *dev, pm_message_t state)
772 error = legacy_suspend(dev, state, dev->bus->suspend); 844 error = legacy_suspend(dev, state, dev->bus->suspend);
773 } 845 }
774 } 846 }
847
848 if (!error)
849 dev->power.status = DPM_OFF;
850
775 End: 851 End:
776 up(&dev->sem); 852 up(&dev->sem);
853 complete_all(&dev->power.completion);
777 854
778 return error; 855 return error;
779} 856}
780 857
858static void async_suspend(void *data, async_cookie_t cookie)
859{
860 struct device *dev = (struct device *)data;
861 int error;
862
863 error = __device_suspend(dev, pm_transition, true);
864 if (error) {
865 pm_dev_err(dev, pm_transition, " async", error);
866 async_error = error;
867 }
868
869 put_device(dev);
870}
871
872static int device_suspend(struct device *dev)
873{
874 INIT_COMPLETION(dev->power.completion);
875
876 if (pm_async_enabled && dev->power.async_suspend) {
877 get_device(dev);
878 async_schedule(async_suspend, dev);
879 return 0;
880 }
881
882 return __device_suspend(dev, pm_transition, false);
883}
884
781/** 885/**
782 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 886 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
783 * @state: PM transition of the system being carried out. 887 * @state: PM transition of the system being carried out.
@@ -790,13 +894,15 @@ static int dpm_suspend(pm_message_t state)
790 894
791 INIT_LIST_HEAD(&list); 895 INIT_LIST_HEAD(&list);
792 mutex_lock(&dpm_list_mtx); 896 mutex_lock(&dpm_list_mtx);
897 pm_transition = state;
898 async_error = 0;
793 while (!list_empty(&dpm_list)) { 899 while (!list_empty(&dpm_list)) {
794 struct device *dev = to_device(dpm_list.prev); 900 struct device *dev = to_device(dpm_list.prev);
795 901
796 get_device(dev); 902 get_device(dev);
797 mutex_unlock(&dpm_list_mtx); 903 mutex_unlock(&dpm_list_mtx);
798 904
799 error = device_suspend(dev, state); 905 error = device_suspend(dev);
800 906
801 mutex_lock(&dpm_list_mtx); 907 mutex_lock(&dpm_list_mtx);
802 if (error) { 908 if (error) {
@@ -804,13 +910,17 @@ static int dpm_suspend(pm_message_t state)
804 put_device(dev); 910 put_device(dev);
805 break; 911 break;
806 } 912 }
807 dev->power.status = DPM_OFF;
808 if (!list_empty(&dev->power.entry)) 913 if (!list_empty(&dev->power.entry))
809 list_move(&dev->power.entry, &list); 914 list_move(&dev->power.entry, &list);
810 put_device(dev); 915 put_device(dev);
916 if (async_error)
917 break;
811 } 918 }
812 list_splice(&list, dpm_list.prev); 919 list_splice(&list, dpm_list.prev);
813 mutex_unlock(&dpm_list_mtx); 920 mutex_unlock(&dpm_list_mtx);
921 async_synchronize_full();
922 if (!error)
923 error = async_error;
814 if (!error) 924 if (!error)
815 dpm_show_time(starttime, state, NULL); 925 dpm_show_time(starttime, state, NULL);
816 return error; 926 return error;
@@ -936,3 +1046,14 @@ void __suspend_report_result(const char *function, void *fn, int ret)
936 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1046 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
937} 1047}
938EXPORT_SYMBOL_GPL(__suspend_report_result); 1048EXPORT_SYMBOL_GPL(__suspend_report_result);
1049
1050/**
1051 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1052 * @dev: Device to wait for.
1053 * @subordinate: Device that needs to wait for @dev.
1054 */
1055void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1056{
1057 dpm_wait(dev, subordinate->power.async_suspend);
1058}
1059EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b8fa1aa5225a..c0bd03c83b9c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {}
12 12
13#ifdef CONFIG_PM_SLEEP 13#ifdef CONFIG_PM_SLEEP
14 14
15/* 15/* kernel/power/main.c */
16 * main.c 16extern int pm_async_enabled;
17 */
18 17
18/* drivers/base/power/main.c */
19extern struct list_head dpm_list; /* The active device list */ 19extern struct list_head dpm_list; /* The active device list */
20 20
21static inline struct device *to_device(struct list_head *entry) 21static inline struct device *to_device(struct list_head *entry)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index f8b044e8aef7..626dd147b75f 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1011,6 +1011,50 @@ void pm_runtime_enable(struct device *dev)
1011EXPORT_SYMBOL_GPL(pm_runtime_enable); 1011EXPORT_SYMBOL_GPL(pm_runtime_enable);
1012 1012
1013/** 1013/**
1014 * pm_runtime_forbid - Block run-time PM of a device.
1015 * @dev: Device to handle.
1016 *
1017 * Increase the device's usage count and clear its power.runtime_auto flag,
1018 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1019 * for it.
1020 */
1021void pm_runtime_forbid(struct device *dev)
1022{
1023 spin_lock_irq(&dev->power.lock);
1024 if (!dev->power.runtime_auto)
1025 goto out;
1026
1027 dev->power.runtime_auto = false;
1028 atomic_inc(&dev->power.usage_count);
1029 __pm_runtime_resume(dev, false);
1030
1031 out:
1032 spin_unlock_irq(&dev->power.lock);
1033}
1034EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1035
1036/**
1037 * pm_runtime_allow - Unblock run-time PM of a device.
1038 * @dev: Device to handle.
1039 *
1040 * Decrease the device's usage count and set its power.runtime_auto flag.
1041 */
1042void pm_runtime_allow(struct device *dev)
1043{
1044 spin_lock_irq(&dev->power.lock);
1045 if (dev->power.runtime_auto)
1046 goto out;
1047
1048 dev->power.runtime_auto = true;
1049 if (atomic_dec_and_test(&dev->power.usage_count))
1050 __pm_runtime_idle(dev);
1051
1052 out:
1053 spin_unlock_irq(&dev->power.lock);
1054}
1055EXPORT_SYMBOL_GPL(pm_runtime_allow);
1056
1057/**
1014 * pm_runtime_init - Initialize run-time PM fields in given device object. 1058 * pm_runtime_init - Initialize run-time PM fields in given device object.
1015 * @dev: Device object to initialize. 1059 * @dev: Device object to initialize.
1016 */ 1060 */
@@ -1028,6 +1072,7 @@ void pm_runtime_init(struct device *dev)
1028 1072
1029 atomic_set(&dev->power.child_count, 0); 1073 atomic_set(&dev->power.child_count, 0);
1030 pm_suspend_ignore_children(dev, false); 1074 pm_suspend_ignore_children(dev, false);
1075 dev->power.runtime_auto = true;
1031 1076
1032 dev->power.request_pending = false; 1077 dev->power.request_pending = false;
1033 dev->power.request = RPM_REQ_NONE; 1078 dev->power.request = RPM_REQ_NONE;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 596aeecfdffe..86fd9373447e 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -4,9 +4,25 @@
4 4
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/pm_runtime.h>
7#include "power.h" 8#include "power.h"
8 9
9/* 10/*
11 * control - Report/change current runtime PM setting of the device
12 *
13 * Runtime power management of a device can be blocked with the help of
14 * this attribute. All devices have one of the following two values for
15 * the power/control file:
16 *
17 * + "auto\n" to allow the device to be power managed at run time;
18 * + "on\n" to prevent the device from being power managed at run time;
19 *
20 * The default for all devices is "auto", which means that devices may be
21 * subject to automatic power management, depending on their drivers.
22 * Changing this attribute to "on" prevents the driver from power managing
23 * the device at run time. Doing that while the device is suspended causes
24 * it to be woken up.
25 *
10 * wakeup - Report/change current wakeup option for device 26 * wakeup - Report/change current wakeup option for device
11 * 27 *
12 * Some devices support "wakeup" events, which are hardware signals 28 * Some devices support "wakeup" events, which are hardware signals
@@ -38,11 +54,61 @@
38 * wakeup events internally (unless they are disabled), keeping 54 * wakeup events internally (unless they are disabled), keeping
39 * their hardware in low power modes whenever they're unused. This 55 * their hardware in low power modes whenever they're unused. This
40 * saves runtime power, without requiring system-wide sleep states. 56 * saves runtime power, without requiring system-wide sleep states.
57 *
58 * async - Report/change current async suspend setting for the device
59 *
60 * Asynchronous suspend and resume of the device during system-wide power
61 * state transitions can be enabled by writing "enabled" to this file.
62 * Analogously, if "disabled" is written to this file, the device will be
63 * suspended and resumed synchronously.
64 *
65 * All devices have one of the following two values for power/async:
66 *
67 * + "enabled\n" to permit the asynchronous suspend/resume of the device;
68 * + "disabled\n" to forbid it;
69 *
70 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
71 * of a device unless it is certain that all of the PM dependencies of the
72 * device are known to the PM core. However, for some devices this
73 * attribute is set to "enabled" by bus type code or device drivers and in
74 * that cases it should be safe to leave the default value.
41 */ 75 */
42 76
43static const char enabled[] = "enabled"; 77static const char enabled[] = "enabled";
44static const char disabled[] = "disabled"; 78static const char disabled[] = "disabled";
45 79
80#ifdef CONFIG_PM_RUNTIME
81static const char ctrl_auto[] = "auto";
82static const char ctrl_on[] = "on";
83
84static ssize_t control_show(struct device *dev, struct device_attribute *attr,
85 char *buf)
86{
87 return sprintf(buf, "%s\n",
88 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
89}
90
91static ssize_t control_store(struct device * dev, struct device_attribute *attr,
92 const char * buf, size_t n)
93{
94 char *cp;
95 int len = n;
96
97 cp = memchr(buf, '\n', n);
98 if (cp)
99 len = cp - buf;
100 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
101 pm_runtime_allow(dev);
102 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
103 pm_runtime_forbid(dev);
104 else
105 return -EINVAL;
106 return n;
107}
108
109static DEVICE_ATTR(control, 0644, control_show, control_store);
110#endif
111
46static ssize_t 112static ssize_t
47wake_show(struct device * dev, struct device_attribute *attr, char * buf) 113wake_show(struct device * dev, struct device_attribute *attr, char * buf)
48{ 114{
@@ -77,9 +143,43 @@ wake_store(struct device * dev, struct device_attribute *attr,
77 143
78static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 144static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
79 145
146#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
147static ssize_t async_show(struct device *dev, struct device_attribute *attr,
148 char *buf)
149{
150 return sprintf(buf, "%s\n",
151 device_async_suspend_enabled(dev) ? enabled : disabled);
152}
153
154static ssize_t async_store(struct device *dev, struct device_attribute *attr,
155 const char *buf, size_t n)
156{
157 char *cp;
158 int len = n;
159
160 cp = memchr(buf, '\n', n);
161 if (cp)
162 len = cp - buf;
163 if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
164 device_enable_async_suspend(dev);
165 else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
166 device_disable_async_suspend(dev);
167 else
168 return -EINVAL;
169 return n;
170}
171
172static DEVICE_ATTR(async, 0644, async_show, async_store);
173#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
80 174
81static struct attribute * power_attrs[] = { 175static struct attribute * power_attrs[] = {
176#ifdef CONFIG_PM_RUNTIME
177 &dev_attr_control.attr,
178#endif
82 &dev_attr_wakeup.attr, 179 &dev_attr_wakeup.attr,
180#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
181 &dev_attr_async.attr,
182#endif
83 NULL, 183 NULL,
84}; 184};
85static struct attribute_group pm_attr_group = { 185static struct attribute_group pm_attr_group = {