diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/core.c | 2 | ||||
-rw-r--r-- | drivers/base/power/Makefile | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 1 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 54 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 98 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 247 |
6 files changed, 375 insertions, 29 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 38bbbd029306..f8e72724dd4b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev, | |||
673 | */ | 673 | */ |
674 | if (parent == NULL) | 674 | if (parent == NULL) |
675 | parent_kobj = virtual_device_parent(dev); | 675 | parent_kobj = virtual_device_parent(dev); |
676 | else if (parent->class) | 676 | else if (parent->class && !dev->class->ns_type) |
677 | return &parent->kobj; | 677 | return &parent->kobj; |
678 | else | 678 | else |
679 | parent_kobj = &parent->kobj; | 679 | parent_kobj = &parent->kobj; |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 89de75325cea..cbccf9a3cee4 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | 4 | obj-$(CONFIG_PM_OPS) += generic_ops.o |
5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb87e52a..5419a49ff135 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) | |||
59 | { | 59 | { |
60 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
61 | init_completion(&dev->power.completion); | 61 | init_completion(&dev->power.completion); |
62 | dev->power.wakeup_count = 0; | ||
62 | pm_runtime_init(dev); | 63 | pm_runtime_init(dev); |
63 | } | 64 | } |
64 | 65 | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b0ec0e9f27e9..b78c401ffa73 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev) | |||
123 | } | 123 | } |
124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | 124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); |
125 | 125 | ||
126 | |||
127 | /** | ||
128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
129 | * @dev: Device to update the accounting for | ||
130 | * | ||
131 | * In order to be able to have time accounting of the various power states | ||
132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
133 | * PM), we need to track the time spent in each state. | ||
134 | * update_pm_runtime_accounting must be called each time before the | ||
135 | * runtime_status field is updated, to account the time in the old state | ||
136 | * correctly. | ||
137 | */ | ||
138 | void update_pm_runtime_accounting(struct device *dev) | ||
139 | { | ||
140 | unsigned long now = jiffies; | ||
141 | int delta; | ||
142 | |||
143 | delta = now - dev->power.accounting_timestamp; | ||
144 | |||
145 | if (delta < 0) | ||
146 | delta = 0; | ||
147 | |||
148 | dev->power.accounting_timestamp = now; | ||
149 | |||
150 | if (dev->power.disable_depth > 0) | ||
151 | return; | ||
152 | |||
153 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
154 | dev->power.suspended_jiffies += delta; | ||
155 | else | ||
156 | dev->power.active_jiffies += delta; | ||
157 | } | ||
158 | |||
159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
160 | { | ||
161 | update_pm_runtime_accounting(dev); | ||
162 | dev->power.runtime_status = status; | ||
163 | } | ||
164 | |||
126 | /** | 165 | /** |
127 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. |
128 | * @dev: Device to suspend. | 167 | * @dev: Device to suspend. |
@@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
197 | goto repeat; | 236 | goto repeat; |
198 | } | 237 | } |
199 | 238 | ||
200 | dev->power.runtime_status = RPM_SUSPENDING; | 239 | __update_runtime_status(dev, RPM_SUSPENDING); |
201 | dev->power.deferred_resume = false; | 240 | dev->power.deferred_resume = false; |
202 | 241 | ||
203 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
@@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
228 | } | 267 | } |
229 | 268 | ||
230 | if (retval) { | 269 | if (retval) { |
231 | dev->power.runtime_status = RPM_ACTIVE; | 270 | __update_runtime_status(dev, RPM_ACTIVE); |
232 | if (retval == -EAGAIN || retval == -EBUSY) { | 271 | if (retval == -EAGAIN || retval == -EBUSY) { |
233 | if (dev->power.timer_expires == 0) | 272 | if (dev->power.timer_expires == 0) |
234 | notify = true; | 273 | notify = true; |
@@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
237 | pm_runtime_cancel_pending(dev); | 276 | pm_runtime_cancel_pending(dev); |
238 | } | 277 | } |
239 | } else { | 278 | } else { |
240 | dev->power.runtime_status = RPM_SUSPENDED; | 279 | __update_runtime_status(dev, RPM_SUSPENDED); |
241 | pm_runtime_deactivate_timer(dev); | 280 | pm_runtime_deactivate_timer(dev); |
242 | 281 | ||
243 | if (dev->parent) { | 282 | if (dev->parent) { |
@@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
381 | goto repeat; | 420 | goto repeat; |
382 | } | 421 | } |
383 | 422 | ||
384 | dev->power.runtime_status = RPM_RESUMING; | 423 | __update_runtime_status(dev, RPM_RESUMING); |
385 | 424 | ||
386 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | 425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { |
387 | spin_unlock_irq(&dev->power.lock); | 426 | spin_unlock_irq(&dev->power.lock); |
@@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
411 | } | 450 | } |
412 | 451 | ||
413 | if (retval) { | 452 | if (retval) { |
414 | dev->power.runtime_status = RPM_SUSPENDED; | 453 | __update_runtime_status(dev, RPM_SUSPENDED); |
415 | pm_runtime_cancel_pending(dev); | 454 | pm_runtime_cancel_pending(dev); |
416 | } else { | 455 | } else { |
417 | dev->power.runtime_status = RPM_ACTIVE; | 456 | __update_runtime_status(dev, RPM_ACTIVE); |
418 | if (parent) | 457 | if (parent) |
419 | atomic_inc(&parent->power.child_count); | 458 | atomic_inc(&parent->power.child_count); |
420 | } | 459 | } |
@@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
848 | } | 887 | } |
849 | 888 | ||
850 | out_set: | 889 | out_set: |
851 | dev->power.runtime_status = status; | 890 | __update_runtime_status(dev, status); |
852 | dev->power.runtime_error = 0; | 891 | dev->power.runtime_error = 0; |
853 | out: | 892 | out: |
854 | spin_unlock_irqrestore(&dev->power.lock, flags); | 893 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev) | |||
1077 | dev->power.request_pending = false; | 1116 | dev->power.request_pending = false; |
1078 | dev->power.request = RPM_REQ_NONE; | 1117 | dev->power.request = RPM_REQ_NONE; |
1079 | dev->power.deferred_resume = false; | 1118 | dev->power.deferred_resume = false; |
1119 | dev->power.accounting_timestamp = jiffies; | ||
1080 | INIT_WORK(&dev->power.work, pm_runtime_work); | 1120 | INIT_WORK(&dev->power.work, pm_runtime_work); |
1081 | 1121 | ||
1082 | dev->power.timer_expires = 0; | 1122 | dev->power.timer_expires = 0; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a4c33bc51257..e56b4388fe61 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/pm_runtime.h> | 7 | #include <linux/pm_runtime.h> |
8 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
9 | #include <linux/jiffies.h> | ||
9 | #include "power.h" | 10 | #include "power.h" |
10 | 11 | ||
11 | /* | 12 | /* |
@@ -73,6 +74,8 @@ | |||
73 | * device are known to the PM core. However, for some devices this | 74 | * device are known to the PM core. However, for some devices this |
74 | * attribute is set to "enabled" by bus type code or device drivers and in | 75 | * attribute is set to "enabled" by bus type code or device drivers and in |
75 | * that cases it should be safe to leave the default value. | 76 | * that cases it should be safe to leave the default value. |
77 | * | ||
78 | * wakeup_count - Report the number of wakeup events related to the device | ||
76 | */ | 79 | */ |
77 | 80 | ||
78 | static const char enabled[] = "enabled"; | 81 | static const char enabled[] = "enabled"; |
@@ -108,6 +111,65 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, | |||
108 | } | 111 | } |
109 | 112 | ||
110 | static DEVICE_ATTR(control, 0644, control_show, control_store); | 113 | static DEVICE_ATTR(control, 0644, control_show, control_store); |
114 | |||
115 | static ssize_t rtpm_active_time_show(struct device *dev, | ||
116 | struct device_attribute *attr, char *buf) | ||
117 | { | ||
118 | int ret; | ||
119 | spin_lock_irq(&dev->power.lock); | ||
120 | update_pm_runtime_accounting(dev); | ||
121 | ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); | ||
122 | spin_unlock_irq(&dev->power.lock); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); | ||
127 | |||
128 | static ssize_t rtpm_suspended_time_show(struct device *dev, | ||
129 | struct device_attribute *attr, char *buf) | ||
130 | { | ||
131 | int ret; | ||
132 | spin_lock_irq(&dev->power.lock); | ||
133 | update_pm_runtime_accounting(dev); | ||
134 | ret = sprintf(buf, "%i\n", | ||
135 | jiffies_to_msecs(dev->power.suspended_jiffies)); | ||
136 | spin_unlock_irq(&dev->power.lock); | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); | ||
141 | |||
142 | static ssize_t rtpm_status_show(struct device *dev, | ||
143 | struct device_attribute *attr, char *buf) | ||
144 | { | ||
145 | const char *p; | ||
146 | |||
147 | if (dev->power.runtime_error) { | ||
148 | p = "error\n"; | ||
149 | } else if (dev->power.disable_depth) { | ||
150 | p = "unsupported\n"; | ||
151 | } else { | ||
152 | switch (dev->power.runtime_status) { | ||
153 | case RPM_SUSPENDED: | ||
154 | p = "suspended\n"; | ||
155 | break; | ||
156 | case RPM_SUSPENDING: | ||
157 | p = "suspending\n"; | ||
158 | break; | ||
159 | case RPM_RESUMING: | ||
160 | p = "resuming\n"; | ||
161 | break; | ||
162 | case RPM_ACTIVE: | ||
163 | p = "active\n"; | ||
164 | break; | ||
165 | default: | ||
166 | return -EIO; | ||
167 | } | ||
168 | } | ||
169 | return sprintf(buf, p); | ||
170 | } | ||
171 | |||
172 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | ||
111 | #endif | 173 | #endif |
112 | 174 | ||
113 | static ssize_t | 175 | static ssize_t |
@@ -144,6 +206,16 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
144 | 206 | ||
145 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 207 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
146 | 208 | ||
209 | #ifdef CONFIG_PM_SLEEP | ||
210 | static ssize_t wakeup_count_show(struct device *dev, | ||
211 | struct device_attribute *attr, char *buf) | ||
212 | { | ||
213 | return sprintf(buf, "%lu\n", dev->power.wakeup_count); | ||
214 | } | ||
215 | |||
216 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); | ||
217 | #endif | ||
218 | |||
147 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 219 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
148 | #ifdef CONFIG_PM_RUNTIME | 220 | #ifdef CONFIG_PM_RUNTIME |
149 | 221 | ||
@@ -172,27 +244,8 @@ static ssize_t rtpm_enabled_show(struct device *dev, | |||
172 | return sprintf(buf, "enabled\n"); | 244 | return sprintf(buf, "enabled\n"); |
173 | } | 245 | } |
174 | 246 | ||
175 | static ssize_t rtpm_status_show(struct device *dev, | ||
176 | struct device_attribute *attr, char *buf) | ||
177 | { | ||
178 | if (dev->power.runtime_error) | ||
179 | return sprintf(buf, "error\n"); | ||
180 | switch (dev->power.runtime_status) { | ||
181 | case RPM_SUSPENDED: | ||
182 | return sprintf(buf, "suspended\n"); | ||
183 | case RPM_SUSPENDING: | ||
184 | return sprintf(buf, "suspending\n"); | ||
185 | case RPM_RESUMING: | ||
186 | return sprintf(buf, "resuming\n"); | ||
187 | case RPM_ACTIVE: | ||
188 | return sprintf(buf, "active\n"); | ||
189 | } | ||
190 | return -EIO; | ||
191 | } | ||
192 | |||
193 | static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); | 247 | static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); |
194 | static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); | 248 | static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); |
195 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | ||
196 | static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); | 249 | static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); |
197 | 250 | ||
198 | #endif | 251 | #endif |
@@ -228,14 +281,19 @@ static DEVICE_ATTR(async, 0644, async_show, async_store); | |||
228 | static struct attribute * power_attrs[] = { | 281 | static struct attribute * power_attrs[] = { |
229 | #ifdef CONFIG_PM_RUNTIME | 282 | #ifdef CONFIG_PM_RUNTIME |
230 | &dev_attr_control.attr, | 283 | &dev_attr_control.attr, |
284 | &dev_attr_runtime_status.attr, | ||
285 | &dev_attr_runtime_suspended_time.attr, | ||
286 | &dev_attr_runtime_active_time.attr, | ||
231 | #endif | 287 | #endif |
232 | &dev_attr_wakeup.attr, | 288 | &dev_attr_wakeup.attr, |
289 | #ifdef CONFIG_PM_SLEEP | ||
290 | &dev_attr_wakeup_count.attr, | ||
291 | #endif | ||
233 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 292 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
234 | &dev_attr_async.attr, | 293 | &dev_attr_async.attr, |
235 | #ifdef CONFIG_PM_RUNTIME | 294 | #ifdef CONFIG_PM_RUNTIME |
236 | &dev_attr_runtime_usage.attr, | 295 | &dev_attr_runtime_usage.attr, |
237 | &dev_attr_runtime_active_kids.attr, | 296 | &dev_attr_runtime_active_kids.attr, |
238 | &dev_attr_runtime_status.attr, | ||
239 | &dev_attr_runtime_enabled.attr, | 297 | &dev_attr_runtime_enabled.attr, |
240 | #endif | 298 | #endif |
241 | #endif | 299 | #endif |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 000000000000..eb594facfc3f --- /dev/null +++ b/drivers/base/power/wakeup.c | |||
@@ -0,0 +1,247 @@ | |||
1 | /* | ||
2 | * drivers/base/power/wakeup.c - System wakeup events framework | ||
3 | * | ||
4 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/capability.h> | ||
13 | #include <linux/suspend.h> | ||
14 | #include <linux/pm.h> | ||
15 | |||
16 | /* | ||
17 | * If set, the suspend/hibernate code will abort transitions to a sleep state | ||
18 | * if wakeup events are registered during or immediately before the transition. | ||
19 | */ | ||
20 | bool events_check_enabled; | ||
21 | |||
22 | /* The counter of registered wakeup events. */ | ||
23 | static unsigned long event_count; | ||
24 | /* A preserved old value of event_count. */ | ||
25 | static unsigned long saved_event_count; | ||
26 | /* The counter of wakeup events being processed. */ | ||
27 | static unsigned long events_in_progress; | ||
28 | |||
29 | static DEFINE_SPINLOCK(events_lock); | ||
30 | |||
31 | static void pm_wakeup_timer_fn(unsigned long data); | ||
32 | |||
33 | static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); | ||
34 | static unsigned long events_timer_expires; | ||
35 | |||
36 | /* | ||
37 | * The functions below use the observation that each wakeup event starts a | ||
38 | * period in which the system should not be suspended. The moment this period | ||
39 | * will end depends on how the wakeup event is going to be processed after being | ||
40 | * detected and all of the possible cases can be divided into two distinct | ||
41 | * groups. | ||
42 | * | ||
43 | * First, a wakeup event may be detected by the same functional unit that will | ||
44 | * carry out the entire processing of it and possibly will pass it to user space | ||
45 | * for further processing. In that case the functional unit that has detected | ||
46 | * the event may later "close" the "no suspend" period associated with it | ||
47 | * directly as soon as it has been dealt with. The pair of pm_stay_awake() and | ||
48 | * pm_relax(), balanced with each other, is supposed to be used in such | ||
49 | * situations. | ||
50 | * | ||
51 | * Second, a wakeup event may be detected by one functional unit and processed | ||
52 | * by another one. In that case the unit that has detected it cannot really | ||
53 | * "close" the "no suspend" period associated with it, unless it knows in | ||
54 | * advance what's going to happen to the event during processing. This | ||
55 | * knowledge, however, may not be available to it, so it can simply specify time | ||
56 | * to wait before the system can be suspended and pass it as the second | ||
57 | * argument of pm_wakeup_event(). | ||
58 | */ | ||
59 | |||
60 | /** | ||
61 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. | ||
62 | * @dev: Device the wakeup event is related to. | ||
63 | * | ||
64 | * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the | ||
65 | * counter of wakeup events being processed. If @dev is not NULL, the counter | ||
66 | * of wakeup events related to @dev is incremented too. | ||
67 | * | ||
68 | * Call this function after detecting of a wakeup event if pm_relax() is going | ||
69 | * to be called directly after processing the event (and possibly passing it to | ||
70 | * user space for further processing). | ||
71 | * | ||
72 | * It is safe to call this function from interrupt context. | ||
73 | */ | ||
74 | void pm_stay_awake(struct device *dev) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | |||
78 | spin_lock_irqsave(&events_lock, flags); | ||
79 | if (dev) | ||
80 | dev->power.wakeup_count++; | ||
81 | |||
82 | events_in_progress++; | ||
83 | spin_unlock_irqrestore(&events_lock, flags); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
88 | * | ||
89 | * Notify the PM core that a wakeup event has been processed by decrementing | ||
90 | * the counter of wakeup events being processed and incrementing the counter | ||
91 | * of registered wakeup events. | ||
92 | * | ||
93 | * Call this function for wakeup events whose processing started with calling | ||
94 | * pm_stay_awake(). | ||
95 | * | ||
96 | * It is safe to call it from interrupt context. | ||
97 | */ | ||
98 | void pm_relax(void) | ||
99 | { | ||
100 | unsigned long flags; | ||
101 | |||
102 | spin_lock_irqsave(&events_lock, flags); | ||
103 | if (events_in_progress) { | ||
104 | events_in_progress--; | ||
105 | event_count++; | ||
106 | } | ||
107 | spin_unlock_irqrestore(&events_lock, flags); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | ||
112 | * | ||
113 | * Decrease the counter of wakeup events being processed after it was increased | ||
114 | * by pm_wakeup_event(). | ||
115 | */ | ||
116 | static void pm_wakeup_timer_fn(unsigned long data) | ||
117 | { | ||
118 | unsigned long flags; | ||
119 | |||
120 | spin_lock_irqsave(&events_lock, flags); | ||
121 | if (events_timer_expires | ||
122 | && time_before_eq(events_timer_expires, jiffies)) { | ||
123 | events_in_progress--; | ||
124 | events_timer_expires = 0; | ||
125 | } | ||
126 | spin_unlock_irqrestore(&events_lock, flags); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * pm_wakeup_event - Notify the PM core of a wakeup event. | ||
131 | * @dev: Device the wakeup event is related to. | ||
132 | * @msec: Anticipated event processing time (in milliseconds). | ||
133 | * | ||
134 | * Notify the PM core of a wakeup event (signaled by @dev) that will take | ||
135 | * approximately @msec milliseconds to be processed by the kernel. Increment | ||
136 | * the counter of registered wakeup events and (if @msec is nonzero) set up | ||
137 | * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the | ||
138 | * timer has not been set up already, increment the counter of wakeup events | ||
139 | * being processed). If @dev is not NULL, the counter of wakeup events related | ||
140 | * to @dev is incremented too. | ||
141 | * | ||
142 | * It is safe to call this function from interrupt context. | ||
143 | */ | ||
144 | void pm_wakeup_event(struct device *dev, unsigned int msec) | ||
145 | { | ||
146 | unsigned long flags; | ||
147 | |||
148 | spin_lock_irqsave(&events_lock, flags); | ||
149 | event_count++; | ||
150 | if (dev) | ||
151 | dev->power.wakeup_count++; | ||
152 | |||
153 | if (msec) { | ||
154 | unsigned long expires; | ||
155 | |||
156 | expires = jiffies + msecs_to_jiffies(msec); | ||
157 | if (!expires) | ||
158 | expires = 1; | ||
159 | |||
160 | if (!events_timer_expires | ||
161 | || time_after(expires, events_timer_expires)) { | ||
162 | if (!events_timer_expires) | ||
163 | events_in_progress++; | ||
164 | |||
165 | mod_timer(&events_timer, expires); | ||
166 | events_timer_expires = expires; | ||
167 | } | ||
168 | } | ||
169 | spin_unlock_irqrestore(&events_lock, flags); | ||
170 | } | ||
171 | |||
172 | /** | ||
173 | * pm_check_wakeup_events - Check for new wakeup events. | ||
174 | * | ||
175 | * Compare the current number of registered wakeup events with its preserved | ||
176 | * value from the past to check if new wakeup events have been registered since | ||
177 | * the old value was stored. Check if the current number of wakeup events being | ||
178 | * processed is zero. | ||
179 | */ | ||
180 | bool pm_check_wakeup_events(void) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | bool ret = true; | ||
184 | |||
185 | spin_lock_irqsave(&events_lock, flags); | ||
186 | if (events_check_enabled) { | ||
187 | ret = (event_count == saved_event_count) && !events_in_progress; | ||
188 | events_check_enabled = ret; | ||
189 | } | ||
190 | spin_unlock_irqrestore(&events_lock, flags); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_get_wakeup_count - Read the number of registered wakeup events. | ||
196 | * @count: Address to store the value at. | ||
197 | * | ||
198 | * Store the number of registered wakeup events at the address in @count. Block | ||
199 | * if the current number of wakeup events being processed is nonzero. | ||
200 | * | ||
201 | * Return false if the wait for the number of wakeup events being processed to | ||
202 | * drop down to zero has been interrupted by a signal (and the current number | ||
203 | * of wakeup events being processed is still nonzero). Otherwise return true. | ||
204 | */ | ||
205 | bool pm_get_wakeup_count(unsigned long *count) | ||
206 | { | ||
207 | bool ret; | ||
208 | |||
209 | spin_lock_irq(&events_lock); | ||
210 | if (capable(CAP_SYS_ADMIN)) | ||
211 | events_check_enabled = false; | ||
212 | |||
213 | while (events_in_progress && !signal_pending(current)) { | ||
214 | spin_unlock_irq(&events_lock); | ||
215 | |||
216 | schedule_timeout_interruptible(msecs_to_jiffies(100)); | ||
217 | |||
218 | spin_lock_irq(&events_lock); | ||
219 | } | ||
220 | *count = event_count; | ||
221 | ret = !events_in_progress; | ||
222 | spin_unlock_irq(&events_lock); | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * pm_save_wakeup_count - Save the current number of registered wakeup events. | ||
228 | * @count: Value to compare with the current number of registered wakeup events. | ||
229 | * | ||
230 | * If @count is equal to the current number of registered wakeup events and the | ||
231 | * current number of wakeup events being processed is zero, store @count as the | ||
232 | * old number of registered wakeup events to be used by pm_check_wakeup_events() | ||
233 | * and return true. Otherwise return false. | ||
234 | */ | ||
235 | bool pm_save_wakeup_count(unsigned long count) | ||
236 | { | ||
237 | bool ret = false; | ||
238 | |||
239 | spin_lock_irq(&events_lock); | ||
240 | if (count == event_count && !events_in_progress) { | ||
241 | saved_event_count = count; | ||
242 | events_check_enabled = true; | ||
243 | ret = true; | ||
244 | } | ||
245 | spin_unlock_irq(&events_lock); | ||
246 | return ret; | ||
247 | } | ||