diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2012-05-11 15:15:09 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-05-11 15:15:09 -0400 |
commit | 351520a9ebfdf2f36cd97c1192f280e0ac7fdcfc (patch) | |
tree | 8f177d2c59be90ebcedc98ed508832a7ebbd7e0a | |
parent | e6d18093ea3d1d30a4de9e29cb1676c1f4b55147 (diff) | |
parent | 4e585d25e120f1eae0a3a8bf8f6ebc7692afec18 (diff) |
Merge branch 'pm-sleep'
* pm-sleep:
PM / Sleep: User space wakeup sources garbage collector Kconfig option
PM / Sleep: Make the limit of user space wakeup sources configurable
PM / Documentation: suspend-and-cpuhotplug.txt: Fix typo
PM / Sleep: Fix a mistake in a conditional in autosleep_store()
epoll: Add a flag, EPOLLWAKEUP, to prevent suspend while epoll events are ready
PM / Sleep: Add user space interface for manipulating wakeup sources, v3
PM / Sleep: Add "prevent autosleep time" statistics to wakeup sources
PM / Sleep: Implement opportunistic sleep, v2
PM / Sleep: Add wakeup_source_activate and wakeup_source_deactivate tracepoints
PM / Sleep: Change wakeup source statistics to follow Android
PM / Sleep: Use wait queue to signal "no wakeup events in progress"
PM / Sleep: Look for wakeup events in later stages of device suspend
PM / Hibernate: Hibernate/thaw fixes/improvements
-rw-r--r-- | Documentation/ABI/testing/sysfs-devices-power | 35 | ||||
-rw-r--r-- | Documentation/ABI/testing/sysfs-power | 59 | ||||
-rw-r--r-- | Documentation/power/suspend-and-cpuhotplug.txt | 2 | ||||
-rw-r--r-- | drivers/base/power/main.c | 10 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 54 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 174 | ||||
-rw-r--r-- | fs/eventpoll.c | 90 | ||||
-rw-r--r-- | include/linux/capability.h | 5 | ||||
-rw-r--r-- | include/linux/eventpoll.h | 12 | ||||
-rw-r--r-- | include/linux/pm_wakeup.h | 15 | ||||
-rw-r--r-- | include/linux/suspend.h | 14 | ||||
-rw-r--r-- | include/trace/events/power.h | 34 | ||||
-rw-r--r-- | kernel/power/Kconfig | 27 | ||||
-rw-r--r-- | kernel/power/Makefile | 2 | ||||
-rw-r--r-- | kernel/power/autosleep.c | 127 | ||||
-rw-r--r-- | kernel/power/main.c | 160 | ||||
-rw-r--r-- | kernel/power/power.h | 27 | ||||
-rw-r--r-- | kernel/power/swap.c | 62 | ||||
-rw-r--r-- | kernel/power/wakelock.c | 259 |
19 files changed, 1050 insertions, 118 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 840f7d64d483..45000f0db4d4 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power | |||
@@ -96,16 +96,26 @@ Description: | |||
96 | is read-only. If the device is not enabled to wake up the | 96 | is read-only. If the device is not enabled to wake up the |
97 | system from sleep states, this attribute is not present. | 97 | system from sleep states, this attribute is not present. |
98 | 98 | ||
99 | What: /sys/devices/.../power/wakeup_hit_count | 99 | What: /sys/devices/.../power/wakeup_abort_count |
100 | Date: September 2010 | 100 | Date: February 2012 |
101 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | 101 | Contact: Rafael J. Wysocki <rjw@sisk.pl> |
102 | Description: | 102 | Description: |
103 | The /sys/devices/.../wakeup_hit_count attribute contains the | 103 | The /sys/devices/.../wakeup_abort_count attribute contains the |
104 | number of times the processing of a wakeup event associated with | 104 | number of times the processing of a wakeup event associated with |
105 | the device might prevent the system from entering a sleep state. | 105 | the device might have aborted system transition into a sleep |
106 | This attribute is read-only. If the device is not enabled to | 106 | state in progress. This attribute is read-only. If the device |
107 | wake up the system from sleep states, this attribute is not | 107 | is not enabled to wake up the system from sleep states, this |
108 | present. | 108 | attribute is not present. |
109 | |||
110 | What: /sys/devices/.../power/wakeup_expire_count | ||
111 | Date: February 2012 | ||
112 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
113 | Description: | ||
114 | The /sys/devices/.../wakeup_expire_count attribute contains the | ||
115 | number of times a wakeup event associated with the device has | ||
116 | been reported with a timeout that expired. This attribute is | ||
117 | read-only. If the device is not enabled to wake up the system | ||
118 | from sleep states, this attribute is not present. | ||
109 | 119 | ||
110 | What: /sys/devices/.../power/wakeup_active | 120 | What: /sys/devices/.../power/wakeup_active |
111 | Date: September 2010 | 121 | Date: September 2010 |
@@ -148,6 +158,17 @@ Description: | |||
148 | not enabled to wake up the system from sleep states, this | 158 | not enabled to wake up the system from sleep states, this |
149 | attribute is not present. | 159 | attribute is not present. |
150 | 160 | ||
161 | What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms | ||
162 | Date: February 2012 | ||
163 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
164 | Description: | ||
165 | The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute | ||
166 | contains the total time the device has been preventing | ||
167 | opportunistic transitions to sleep states from occuring. | ||
168 | This attribute is read-only. If the device is not enabled to | ||
169 | wake up the system from sleep states, this attribute is not | ||
170 | present. | ||
171 | |||
151 | What: /sys/devices/.../power/autosuspend_delay_ms | 172 | What: /sys/devices/.../power/autosuspend_delay_ms |
152 | Date: September 2010 | 173 | Date: September 2010 |
153 | Contact: Alan Stern <stern@rowland.harvard.edu> | 174 | Contact: Alan Stern <stern@rowland.harvard.edu> |
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index b464d12761ba..31725ffeeb3a 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
@@ -172,3 +172,62 @@ Description: | |||
172 | 172 | ||
173 | Reading from this file will display the current value, which is | 173 | Reading from this file will display the current value, which is |
174 | set to 1 MB by default. | 174 | set to 1 MB by default. |
175 | |||
176 | What: /sys/power/autosleep | ||
177 | Date: April 2012 | ||
178 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
179 | Description: | ||
180 | The /sys/power/autosleep file can be written one of the strings | ||
181 | returned by reads from /sys/power/state. If that happens, a | ||
182 | work item attempting to trigger a transition of the system to | ||
183 | the sleep state represented by that string is queued up. This | ||
184 | attempt will only succeed if there are no active wakeup sources | ||
185 | in the system at that time. After every execution, regardless | ||
186 | of whether or not the attempt to put the system to sleep has | ||
187 | succeeded, the work item requeues itself until user space | ||
188 | writes "off" to /sys/power/autosleep. | ||
189 | |||
190 | Reading from this file causes the last string successfully | ||
191 | written to it to be returned. | ||
192 | |||
193 | What: /sys/power/wake_lock | ||
194 | Date: February 2012 | ||
195 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
196 | Description: | ||
197 | The /sys/power/wake_lock file allows user space to create | ||
198 | wakeup source objects and activate them on demand (if one of | ||
199 | those wakeup sources is active, reads from the | ||
200 | /sys/power/wakeup_count file block or return false). When a | ||
201 | string without white space is written to /sys/power/wake_lock, | ||
202 | it will be assumed to represent a wakeup source name. If there | ||
203 | is a wakeup source object with that name, it will be activated | ||
204 | (unless active already). Otherwise, a new wakeup source object | ||
205 | will be registered, assigned the given name and activated. | ||
206 | If a string written to /sys/power/wake_lock contains white | ||
207 | space, the part of the string preceding the white space will be | ||
208 | regarded as a wakeup source name and handled as descrived above. | ||
209 | The other part of the string will be regarded as a timeout (in | ||
210 | nanoseconds) such that the wakeup source will be automatically | ||
211 | deactivated after it has expired. The timeout, if present, is | ||
212 | set regardless of the current state of the wakeup source object | ||
213 | in question. | ||
214 | |||
215 | Reads from this file return a string consisting of the names of | ||
216 | wakeup sources created with the help of it that are active at | ||
217 | the moment, separated with spaces. | ||
218 | |||
219 | |||
220 | What: /sys/power/wake_unlock | ||
221 | Date: February 2012 | ||
222 | Contact: Rafael J. Wysocki <rjw@sisk.pl> | ||
223 | Description: | ||
224 | The /sys/power/wake_unlock file allows user space to deactivate | ||
225 | wakeup sources created with the help of /sys/power/wake_lock. | ||
226 | When a string is written to /sys/power/wake_unlock, it will be | ||
227 | assumed to represent the name of a wakeup source to deactivate. | ||
228 | If a wakeup source object of that name exists and is active at | ||
229 | the moment, it will be deactivated. | ||
230 | |||
231 | Reads from this file return a string consisting of the names of | ||
232 | wakeup sources created with the help of /sys/power/wake_lock | ||
233 | that are inactive at the moment, separated with spaces. | ||
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt index f28f9a6f0347..e13dafc8e8f1 100644 --- a/Documentation/power/suspend-and-cpuhotplug.txt +++ b/Documentation/power/suspend-and-cpuhotplug.txt | |||
@@ -29,7 +29,7 @@ More details follow: | |||
29 | 29 | ||
30 | Write 'mem' to | 30 | Write 'mem' to |
31 | /sys/power/state | 31 | /sys/power/state |
32 | syfs file | 32 | sysfs file |
33 | | | 33 | | |
34 | v | 34 | v |
35 | Acquire pm_mutex lock | 35 | Acquire pm_mutex lock |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index b462c0e341cb..e0fb5b0435a3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -889,6 +889,11 @@ static int dpm_suspend_noirq(pm_message_t state) | |||
889 | if (!list_empty(&dev->power.entry)) | 889 | if (!list_empty(&dev->power.entry)) |
890 | list_move(&dev->power.entry, &dpm_noirq_list); | 890 | list_move(&dev->power.entry, &dpm_noirq_list); |
891 | put_device(dev); | 891 | put_device(dev); |
892 | |||
893 | if (pm_wakeup_pending()) { | ||
894 | error = -EBUSY; | ||
895 | break; | ||
896 | } | ||
892 | } | 897 | } |
893 | mutex_unlock(&dpm_list_mtx); | 898 | mutex_unlock(&dpm_list_mtx); |
894 | if (error) | 899 | if (error) |
@@ -962,6 +967,11 @@ static int dpm_suspend_late(pm_message_t state) | |||
962 | if (!list_empty(&dev->power.entry)) | 967 | if (!list_empty(&dev->power.entry)) |
963 | list_move(&dev->power.entry, &dpm_late_early_list); | 968 | list_move(&dev->power.entry, &dpm_late_early_list); |
964 | put_device(dev); | 969 | put_device(dev); |
970 | |||
971 | if (pm_wakeup_pending()) { | ||
972 | error = -EBUSY; | ||
973 | break; | ||
974 | } | ||
965 | } | 975 | } |
966 | mutex_unlock(&dpm_list_mtx); | 976 | mutex_unlock(&dpm_list_mtx); |
967 | if (error) | 977 | if (error) |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 95c12f6cb5b9..48be2ad4dd2c 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -314,22 +314,41 @@ static ssize_t wakeup_active_count_show(struct device *dev, | |||
314 | 314 | ||
315 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); | 315 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); |
316 | 316 | ||
317 | static ssize_t wakeup_hit_count_show(struct device *dev, | 317 | static ssize_t wakeup_abort_count_show(struct device *dev, |
318 | struct device_attribute *attr, char *buf) | 318 | struct device_attribute *attr, |
319 | char *buf) | ||
320 | { | ||
321 | unsigned long count = 0; | ||
322 | bool enabled = false; | ||
323 | |||
324 | spin_lock_irq(&dev->power.lock); | ||
325 | if (dev->power.wakeup) { | ||
326 | count = dev->power.wakeup->wakeup_count; | ||
327 | enabled = true; | ||
328 | } | ||
329 | spin_unlock_irq(&dev->power.lock); | ||
330 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
331 | } | ||
332 | |||
333 | static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); | ||
334 | |||
335 | static ssize_t wakeup_expire_count_show(struct device *dev, | ||
336 | struct device_attribute *attr, | ||
337 | char *buf) | ||
319 | { | 338 | { |
320 | unsigned long count = 0; | 339 | unsigned long count = 0; |
321 | bool enabled = false; | 340 | bool enabled = false; |
322 | 341 | ||
323 | spin_lock_irq(&dev->power.lock); | 342 | spin_lock_irq(&dev->power.lock); |
324 | if (dev->power.wakeup) { | 343 | if (dev->power.wakeup) { |
325 | count = dev->power.wakeup->hit_count; | 344 | count = dev->power.wakeup->expire_count; |
326 | enabled = true; | 345 | enabled = true; |
327 | } | 346 | } |
328 | spin_unlock_irq(&dev->power.lock); | 347 | spin_unlock_irq(&dev->power.lock); |
329 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | 348 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); |
330 | } | 349 | } |
331 | 350 | ||
332 | static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); | 351 | static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); |
333 | 352 | ||
334 | static ssize_t wakeup_active_show(struct device *dev, | 353 | static ssize_t wakeup_active_show(struct device *dev, |
335 | struct device_attribute *attr, char *buf) | 354 | struct device_attribute *attr, char *buf) |
@@ -398,6 +417,27 @@ static ssize_t wakeup_last_time_show(struct device *dev, | |||
398 | } | 417 | } |
399 | 418 | ||
400 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); | 419 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); |
420 | |||
421 | #ifdef CONFIG_PM_AUTOSLEEP | ||
422 | static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, | ||
423 | struct device_attribute *attr, | ||
424 | char *buf) | ||
425 | { | ||
426 | s64 msec = 0; | ||
427 | bool enabled = false; | ||
428 | |||
429 | spin_lock_irq(&dev->power.lock); | ||
430 | if (dev->power.wakeup) { | ||
431 | msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); | ||
432 | enabled = true; | ||
433 | } | ||
434 | spin_unlock_irq(&dev->power.lock); | ||
435 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
436 | } | ||
437 | |||
438 | static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, | ||
439 | wakeup_prevent_sleep_time_show, NULL); | ||
440 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
401 | #endif /* CONFIG_PM_SLEEP */ | 441 | #endif /* CONFIG_PM_SLEEP */ |
402 | 442 | ||
403 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 443 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
@@ -486,11 +526,15 @@ static struct attribute *wakeup_attrs[] = { | |||
486 | &dev_attr_wakeup.attr, | 526 | &dev_attr_wakeup.attr, |
487 | &dev_attr_wakeup_count.attr, | 527 | &dev_attr_wakeup_count.attr, |
488 | &dev_attr_wakeup_active_count.attr, | 528 | &dev_attr_wakeup_active_count.attr, |
489 | &dev_attr_wakeup_hit_count.attr, | 529 | &dev_attr_wakeup_abort_count.attr, |
530 | &dev_attr_wakeup_expire_count.attr, | ||
490 | &dev_attr_wakeup_active.attr, | 531 | &dev_attr_wakeup_active.attr, |
491 | &dev_attr_wakeup_total_time_ms.attr, | 532 | &dev_attr_wakeup_total_time_ms.attr, |
492 | &dev_attr_wakeup_max_time_ms.attr, | 533 | &dev_attr_wakeup_max_time_ms.attr, |
493 | &dev_attr_wakeup_last_time_ms.attr, | 534 | &dev_attr_wakeup_last_time_ms.attr, |
535 | #ifdef CONFIG_PM_AUTOSLEEP | ||
536 | &dev_attr_wakeup_prevent_sleep_time_ms.attr, | ||
537 | #endif | ||
494 | #endif | 538 | #endif |
495 | NULL, | 539 | NULL, |
496 | }; | 540 | }; |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2a3e581b8dcd..cbb463b3a750 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -14,16 +14,15 @@ | |||
14 | #include <linux/suspend.h> | 14 | #include <linux/suspend.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/debugfs.h> | 16 | #include <linux/debugfs.h> |
17 | #include <trace/events/power.h> | ||
17 | 18 | ||
18 | #include "power.h" | 19 | #include "power.h" |
19 | 20 | ||
20 | #define TIMEOUT 100 | ||
21 | |||
22 | /* | 21 | /* |
23 | * If set, the suspend/hibernate code will abort transitions to a sleep state | 22 | * If set, the suspend/hibernate code will abort transitions to a sleep state |
24 | * if wakeup events are registered during or immediately before the transition. | 23 | * if wakeup events are registered during or immediately before the transition. |
25 | */ | 24 | */ |
26 | bool events_check_enabled; | 25 | bool events_check_enabled __read_mostly; |
27 | 26 | ||
28 | /* | 27 | /* |
29 | * Combined counters of registered wakeup events and wakeup events in progress. | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
@@ -52,6 +51,8 @@ static void pm_wakeup_timer_fn(unsigned long data); | |||
52 | 51 | ||
53 | static LIST_HEAD(wakeup_sources); | 52 | static LIST_HEAD(wakeup_sources); |
54 | 53 | ||
54 | static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); | ||
55 | |||
55 | /** | 56 | /** |
56 | * wakeup_source_prepare - Prepare a new wakeup source for initialization. | 57 | * wakeup_source_prepare - Prepare a new wakeup source for initialization. |
57 | * @ws: Wakeup source to prepare. | 58 | * @ws: Wakeup source to prepare. |
@@ -132,6 +133,7 @@ void wakeup_source_add(struct wakeup_source *ws) | |||
132 | spin_lock_init(&ws->lock); | 133 | spin_lock_init(&ws->lock); |
133 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | 134 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); |
134 | ws->active = false; | 135 | ws->active = false; |
136 | ws->last_time = ktime_get(); | ||
135 | 137 | ||
136 | spin_lock_irq(&events_lock); | 138 | spin_lock_irq(&events_lock); |
137 | list_add_rcu(&ws->entry, &wakeup_sources); | 139 | list_add_rcu(&ws->entry, &wakeup_sources); |
@@ -374,12 +376,33 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_enable); | |||
374 | */ | 376 | */ |
375 | static void wakeup_source_activate(struct wakeup_source *ws) | 377 | static void wakeup_source_activate(struct wakeup_source *ws) |
376 | { | 378 | { |
379 | unsigned int cec; | ||
380 | |||
377 | ws->active = true; | 381 | ws->active = true; |
378 | ws->active_count++; | 382 | ws->active_count++; |
379 | ws->last_time = ktime_get(); | 383 | ws->last_time = ktime_get(); |
384 | if (ws->autosleep_enabled) | ||
385 | ws->start_prevent_time = ws->last_time; | ||
380 | 386 | ||
381 | /* Increment the counter of events in progress. */ | 387 | /* Increment the counter of events in progress. */ |
382 | atomic_inc(&combined_event_count); | 388 | cec = atomic_inc_return(&combined_event_count); |
389 | |||
390 | trace_wakeup_source_activate(ws->name, cec); | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * wakeup_source_report_event - Report wakeup event using the given source. | ||
395 | * @ws: Wakeup source to report the event for. | ||
396 | */ | ||
397 | static void wakeup_source_report_event(struct wakeup_source *ws) | ||
398 | { | ||
399 | ws->event_count++; | ||
400 | /* This is racy, but the counter is approximate anyway. */ | ||
401 | if (events_check_enabled) | ||
402 | ws->wakeup_count++; | ||
403 | |||
404 | if (!ws->active) | ||
405 | wakeup_source_activate(ws); | ||
383 | } | 406 | } |
384 | 407 | ||
385 | /** | 408 | /** |
@@ -397,10 +420,7 @@ void __pm_stay_awake(struct wakeup_source *ws) | |||
397 | 420 | ||
398 | spin_lock_irqsave(&ws->lock, flags); | 421 | spin_lock_irqsave(&ws->lock, flags); |
399 | 422 | ||
400 | ws->event_count++; | 423 | wakeup_source_report_event(ws); |
401 | if (!ws->active) | ||
402 | wakeup_source_activate(ws); | ||
403 | |||
404 | del_timer(&ws->timer); | 424 | del_timer(&ws->timer); |
405 | ws->timer_expires = 0; | 425 | ws->timer_expires = 0; |
406 | 426 | ||
@@ -432,6 +452,17 @@ void pm_stay_awake(struct device *dev) | |||
432 | } | 452 | } |
433 | EXPORT_SYMBOL_GPL(pm_stay_awake); | 453 | EXPORT_SYMBOL_GPL(pm_stay_awake); |
434 | 454 | ||
455 | #ifdef CONFIG_PM_AUTOSLEEP | ||
456 | static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) | ||
457 | { | ||
458 | ktime_t delta = ktime_sub(now, ws->start_prevent_time); | ||
459 | ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); | ||
460 | } | ||
461 | #else | ||
462 | static inline void update_prevent_sleep_time(struct wakeup_source *ws, | ||
463 | ktime_t now) {} | ||
464 | #endif | ||
465 | |||
435 | /** | 466 | /** |
436 | * wakup_source_deactivate - Mark given wakeup source as inactive. | 467 | * wakup_source_deactivate - Mark given wakeup source as inactive. |
437 | * @ws: Wakeup source to handle. | 468 | * @ws: Wakeup source to handle. |
@@ -442,6 +473,7 @@ EXPORT_SYMBOL_GPL(pm_stay_awake); | |||
442 | */ | 473 | */ |
443 | static void wakeup_source_deactivate(struct wakeup_source *ws) | 474 | static void wakeup_source_deactivate(struct wakeup_source *ws) |
444 | { | 475 | { |
476 | unsigned int cnt, inpr, cec; | ||
445 | ktime_t duration; | 477 | ktime_t duration; |
446 | ktime_t now; | 478 | ktime_t now; |
447 | 479 | ||
@@ -468,14 +500,23 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
468 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) | 500 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) |
469 | ws->max_time = duration; | 501 | ws->max_time = duration; |
470 | 502 | ||
503 | ws->last_time = now; | ||
471 | del_timer(&ws->timer); | 504 | del_timer(&ws->timer); |
472 | ws->timer_expires = 0; | 505 | ws->timer_expires = 0; |
473 | 506 | ||
507 | if (ws->autosleep_enabled) | ||
508 | update_prevent_sleep_time(ws, now); | ||
509 | |||
474 | /* | 510 | /* |
475 | * Increment the counter of registered wakeup events and decrement the | 511 | * Increment the counter of registered wakeup events and decrement the |
476 | * couter of wakeup events in progress simultaneously. | 512 | * couter of wakeup events in progress simultaneously. |
477 | */ | 513 | */ |
478 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); | 514 | cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); |
515 | trace_wakeup_source_deactivate(ws->name, cec); | ||
516 | |||
517 | split_counters(&cnt, &inpr); | ||
518 | if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) | ||
519 | wake_up(&wakeup_count_wait_queue); | ||
479 | } | 520 | } |
480 | 521 | ||
481 | /** | 522 | /** |
@@ -536,8 +577,10 @@ static void pm_wakeup_timer_fn(unsigned long data) | |||
536 | spin_lock_irqsave(&ws->lock, flags); | 577 | spin_lock_irqsave(&ws->lock, flags); |
537 | 578 | ||
538 | if (ws->active && ws->timer_expires | 579 | if (ws->active && ws->timer_expires |
539 | && time_after_eq(jiffies, ws->timer_expires)) | 580 | && time_after_eq(jiffies, ws->timer_expires)) { |
540 | wakeup_source_deactivate(ws); | 581 | wakeup_source_deactivate(ws); |
582 | ws->expire_count++; | ||
583 | } | ||
541 | 584 | ||
542 | spin_unlock_irqrestore(&ws->lock, flags); | 585 | spin_unlock_irqrestore(&ws->lock, flags); |
543 | } | 586 | } |
@@ -564,9 +607,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | |||
564 | 607 | ||
565 | spin_lock_irqsave(&ws->lock, flags); | 608 | spin_lock_irqsave(&ws->lock, flags); |
566 | 609 | ||
567 | ws->event_count++; | 610 | wakeup_source_report_event(ws); |
568 | if (!ws->active) | ||
569 | wakeup_source_activate(ws); | ||
570 | 611 | ||
571 | if (!msec) { | 612 | if (!msec) { |
572 | wakeup_source_deactivate(ws); | 613 | wakeup_source_deactivate(ws); |
@@ -609,24 +650,6 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) | |||
609 | EXPORT_SYMBOL_GPL(pm_wakeup_event); | 650 | EXPORT_SYMBOL_GPL(pm_wakeup_event); |
610 | 651 | ||
611 | /** | 652 | /** |
612 | * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. | ||
613 | */ | ||
614 | static void pm_wakeup_update_hit_counts(void) | ||
615 | { | ||
616 | unsigned long flags; | ||
617 | struct wakeup_source *ws; | ||
618 | |||
619 | rcu_read_lock(); | ||
620 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { | ||
621 | spin_lock_irqsave(&ws->lock, flags); | ||
622 | if (ws->active) | ||
623 | ws->hit_count++; | ||
624 | spin_unlock_irqrestore(&ws->lock, flags); | ||
625 | } | ||
626 | rcu_read_unlock(); | ||
627 | } | ||
628 | |||
629 | /** | ||
630 | * pm_wakeup_pending - Check if power transition in progress should be aborted. | 653 | * pm_wakeup_pending - Check if power transition in progress should be aborted. |
631 | * | 654 | * |
632 | * Compare the current number of registered wakeup events with its preserved | 655 | * Compare the current number of registered wakeup events with its preserved |
@@ -648,32 +671,38 @@ bool pm_wakeup_pending(void) | |||
648 | events_check_enabled = !ret; | 671 | events_check_enabled = !ret; |
649 | } | 672 | } |
650 | spin_unlock_irqrestore(&events_lock, flags); | 673 | spin_unlock_irqrestore(&events_lock, flags); |
651 | if (ret) | ||
652 | pm_wakeup_update_hit_counts(); | ||
653 | return ret; | 674 | return ret; |
654 | } | 675 | } |
655 | 676 | ||
656 | /** | 677 | /** |
657 | * pm_get_wakeup_count - Read the number of registered wakeup events. | 678 | * pm_get_wakeup_count - Read the number of registered wakeup events. |
658 | * @count: Address to store the value at. | 679 | * @count: Address to store the value at. |
680 | * @block: Whether or not to block. | ||
659 | * | 681 | * |
660 | * Store the number of registered wakeup events at the address in @count. Block | 682 | * Store the number of registered wakeup events at the address in @count. If |
661 | * if the current number of wakeup events being processed is nonzero. | 683 | * @block is set, block until the current number of wakeup events being |
684 | * processed is zero. | ||
662 | * | 685 | * |
663 | * Return 'false' if the wait for the number of wakeup events being processed to | 686 | * Return 'false' if the current number of wakeup events being processed is |
664 | * drop down to zero has been interrupted by a signal (and the current number | 687 | * nonzero. Otherwise return 'true'. |
665 | * of wakeup events being processed is still nonzero). Otherwise return 'true'. | ||
666 | */ | 688 | */ |
667 | bool pm_get_wakeup_count(unsigned int *count) | 689 | bool pm_get_wakeup_count(unsigned int *count, bool block) |
668 | { | 690 | { |
669 | unsigned int cnt, inpr; | 691 | unsigned int cnt, inpr; |
670 | 692 | ||
671 | for (;;) { | 693 | if (block) { |
672 | split_counters(&cnt, &inpr); | 694 | DEFINE_WAIT(wait); |
673 | if (inpr == 0 || signal_pending(current)) | 695 | |
674 | break; | 696 | for (;;) { |
675 | pm_wakeup_update_hit_counts(); | 697 | prepare_to_wait(&wakeup_count_wait_queue, &wait, |
676 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | 698 | TASK_INTERRUPTIBLE); |
699 | split_counters(&cnt, &inpr); | ||
700 | if (inpr == 0 || signal_pending(current)) | ||
701 | break; | ||
702 | |||
703 | schedule(); | ||
704 | } | ||
705 | finish_wait(&wakeup_count_wait_queue, &wait); | ||
677 | } | 706 | } |
678 | 707 | ||
679 | split_counters(&cnt, &inpr); | 708 | split_counters(&cnt, &inpr); |
@@ -703,11 +732,37 @@ bool pm_save_wakeup_count(unsigned int count) | |||
703 | events_check_enabled = true; | 732 | events_check_enabled = true; |
704 | } | 733 | } |
705 | spin_unlock_irq(&events_lock); | 734 | spin_unlock_irq(&events_lock); |
706 | if (!events_check_enabled) | ||
707 | pm_wakeup_update_hit_counts(); | ||
708 | return events_check_enabled; | 735 | return events_check_enabled; |
709 | } | 736 | } |
710 | 737 | ||
738 | #ifdef CONFIG_PM_AUTOSLEEP | ||
739 | /** | ||
740 | * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. | ||
741 | * @enabled: Whether to set or to clear the autosleep_enabled flags. | ||
742 | */ | ||
743 | void pm_wakep_autosleep_enabled(bool set) | ||
744 | { | ||
745 | struct wakeup_source *ws; | ||
746 | ktime_t now = ktime_get(); | ||
747 | |||
748 | rcu_read_lock(); | ||
749 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { | ||
750 | spin_lock_irq(&ws->lock); | ||
751 | if (ws->autosleep_enabled != set) { | ||
752 | ws->autosleep_enabled = set; | ||
753 | if (ws->active) { | ||
754 | if (set) | ||
755 | ws->start_prevent_time = now; | ||
756 | else | ||
757 | update_prevent_sleep_time(ws, now); | ||
758 | } | ||
759 | } | ||
760 | spin_unlock_irq(&ws->lock); | ||
761 | } | ||
762 | rcu_read_unlock(); | ||
763 | } | ||
764 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
765 | |||
711 | static struct dentry *wakeup_sources_stats_dentry; | 766 | static struct dentry *wakeup_sources_stats_dentry; |
712 | 767 | ||
713 | /** | 768 | /** |
@@ -723,27 +778,37 @@ static int print_wakeup_source_stats(struct seq_file *m, | |||
723 | ktime_t max_time; | 778 | ktime_t max_time; |
724 | unsigned long active_count; | 779 | unsigned long active_count; |
725 | ktime_t active_time; | 780 | ktime_t active_time; |
781 | ktime_t prevent_sleep_time; | ||
726 | int ret; | 782 | int ret; |
727 | 783 | ||
728 | spin_lock_irqsave(&ws->lock, flags); | 784 | spin_lock_irqsave(&ws->lock, flags); |
729 | 785 | ||
730 | total_time = ws->total_time; | 786 | total_time = ws->total_time; |
731 | max_time = ws->max_time; | 787 | max_time = ws->max_time; |
788 | prevent_sleep_time = ws->prevent_sleep_time; | ||
732 | active_count = ws->active_count; | 789 | active_count = ws->active_count; |
733 | if (ws->active) { | 790 | if (ws->active) { |
734 | active_time = ktime_sub(ktime_get(), ws->last_time); | 791 | ktime_t now = ktime_get(); |
792 | |||
793 | active_time = ktime_sub(now, ws->last_time); | ||
735 | total_time = ktime_add(total_time, active_time); | 794 | total_time = ktime_add(total_time, active_time); |
736 | if (active_time.tv64 > max_time.tv64) | 795 | if (active_time.tv64 > max_time.tv64) |
737 | max_time = active_time; | 796 | max_time = active_time; |
797 | |||
798 | if (ws->autosleep_enabled) | ||
799 | prevent_sleep_time = ktime_add(prevent_sleep_time, | ||
800 | ktime_sub(now, ws->start_prevent_time)); | ||
738 | } else { | 801 | } else { |
739 | active_time = ktime_set(0, 0); | 802 | active_time = ktime_set(0, 0); |
740 | } | 803 | } |
741 | 804 | ||
742 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" | 805 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t" |
743 | "%lld\t\t%lld\t\t%lld\t\t%lld\n", | 806 | "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", |
744 | ws->name, active_count, ws->event_count, ws->hit_count, | 807 | ws->name, active_count, ws->event_count, |
808 | ws->wakeup_count, ws->expire_count, | ||
745 | ktime_to_ms(active_time), ktime_to_ms(total_time), | 809 | ktime_to_ms(active_time), ktime_to_ms(total_time), |
746 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); | 810 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time), |
811 | ktime_to_ms(prevent_sleep_time)); | ||
747 | 812 | ||
748 | spin_unlock_irqrestore(&ws->lock, flags); | 813 | spin_unlock_irqrestore(&ws->lock, flags); |
749 | 814 | ||
@@ -758,8 +823,9 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) | |||
758 | { | 823 | { |
759 | struct wakeup_source *ws; | 824 | struct wakeup_source *ws; |
760 | 825 | ||
761 | seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" | 826 | seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" |
762 | "active_since\ttotal_time\tmax_time\tlast_change\n"); | 827 | "expire_count\tactive_since\ttotal_time\tmax_time\t" |
828 | "last_change\tprevent_suspend_time\n"); | ||
763 | 829 | ||
764 | rcu_read_lock(); | 830 | rcu_read_lock(); |
765 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) | 831 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index c0b3c70ee87a..2cf0f2153be5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
35 | #include <linux/anon_inodes.h> | 35 | #include <linux/anon_inodes.h> |
36 | #include <linux/device.h> | ||
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
38 | #include <asm/mman.h> | 39 | #include <asm/mman.h> |
@@ -87,7 +88,7 @@ | |||
87 | */ | 88 | */ |
88 | 89 | ||
89 | /* Epoll private bits inside the event mask */ | 90 | /* Epoll private bits inside the event mask */ |
90 | #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET) | 91 | #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET) |
91 | 92 | ||
92 | /* Maximum number of nesting allowed inside epoll sets */ | 93 | /* Maximum number of nesting allowed inside epoll sets */ |
93 | #define EP_MAX_NESTS 4 | 94 | #define EP_MAX_NESTS 4 |
@@ -154,6 +155,9 @@ struct epitem { | |||
154 | /* List header used to link this item to the "struct file" items list */ | 155 | /* List header used to link this item to the "struct file" items list */ |
155 | struct list_head fllink; | 156 | struct list_head fllink; |
156 | 157 | ||
158 | /* wakeup_source used when EPOLLWAKEUP is set */ | ||
159 | struct wakeup_source *ws; | ||
160 | |||
157 | /* The structure that describe the interested events and the source fd */ | 161 | /* The structure that describe the interested events and the source fd */ |
158 | struct epoll_event event; | 162 | struct epoll_event event; |
159 | }; | 163 | }; |
@@ -194,6 +198,9 @@ struct eventpoll { | |||
194 | */ | 198 | */ |
195 | struct epitem *ovflist; | 199 | struct epitem *ovflist; |
196 | 200 | ||
201 | /* wakeup_source used when ep_scan_ready_list is running */ | ||
202 | struct wakeup_source *ws; | ||
203 | |||
197 | /* The user that created the eventpoll descriptor */ | 204 | /* The user that created the eventpoll descriptor */ |
198 | struct user_struct *user; | 205 | struct user_struct *user; |
199 | 206 | ||
@@ -588,8 +595,10 @@ static int ep_scan_ready_list(struct eventpoll *ep, | |||
588 | * queued into ->ovflist but the "txlist" might already | 595 | * queued into ->ovflist but the "txlist" might already |
589 | * contain them, and the list_splice() below takes care of them. | 596 | * contain them, and the list_splice() below takes care of them. |
590 | */ | 597 | */ |
591 | if (!ep_is_linked(&epi->rdllink)) | 598 | if (!ep_is_linked(&epi->rdllink)) { |
592 | list_add_tail(&epi->rdllink, &ep->rdllist); | 599 | list_add_tail(&epi->rdllink, &ep->rdllist); |
600 | __pm_stay_awake(epi->ws); | ||
601 | } | ||
593 | } | 602 | } |
594 | /* | 603 | /* |
595 | * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after | 604 | * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after |
@@ -602,6 +611,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, | |||
602 | * Quickly re-inject items left on "txlist". | 611 | * Quickly re-inject items left on "txlist". |
603 | */ | 612 | */ |
604 | list_splice(&txlist, &ep->rdllist); | 613 | list_splice(&txlist, &ep->rdllist); |
614 | __pm_relax(ep->ws); | ||
605 | 615 | ||
606 | if (!list_empty(&ep->rdllist)) { | 616 | if (!list_empty(&ep->rdllist)) { |
607 | /* | 617 | /* |
@@ -656,6 +666,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) | |||
656 | list_del_init(&epi->rdllink); | 666 | list_del_init(&epi->rdllink); |
657 | spin_unlock_irqrestore(&ep->lock, flags); | 667 | spin_unlock_irqrestore(&ep->lock, flags); |
658 | 668 | ||
669 | wakeup_source_unregister(epi->ws); | ||
670 | |||
659 | /* At this point it is safe to free the eventpoll item */ | 671 | /* At this point it is safe to free the eventpoll item */ |
660 | kmem_cache_free(epi_cache, epi); | 672 | kmem_cache_free(epi_cache, epi); |
661 | 673 | ||
@@ -706,6 +718,7 @@ static void ep_free(struct eventpoll *ep) | |||
706 | mutex_unlock(&epmutex); | 718 | mutex_unlock(&epmutex); |
707 | mutex_destroy(&ep->mtx); | 719 | mutex_destroy(&ep->mtx); |
708 | free_uid(ep->user); | 720 | free_uid(ep->user); |
721 | wakeup_source_unregister(ep->ws); | ||
709 | kfree(ep); | 722 | kfree(ep); |
710 | } | 723 | } |
711 | 724 | ||
@@ -737,6 +750,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, | |||
737 | * callback, but it's not actually ready, as far as | 750 | * callback, but it's not actually ready, as far as |
738 | * caller requested events goes. We can remove it here. | 751 | * caller requested events goes. We can remove it here. |
739 | */ | 752 | */ |
753 | __pm_relax(epi->ws); | ||
740 | list_del_init(&epi->rdllink); | 754 | list_del_init(&epi->rdllink); |
741 | } | 755 | } |
742 | } | 756 | } |
@@ -927,13 +941,23 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k | |||
927 | if (epi->next == EP_UNACTIVE_PTR) { | 941 | if (epi->next == EP_UNACTIVE_PTR) { |
928 | epi->next = ep->ovflist; | 942 | epi->next = ep->ovflist; |
929 | ep->ovflist = epi; | 943 | ep->ovflist = epi; |
944 | if (epi->ws) { | ||
945 | /* | ||
946 | * Activate ep->ws since epi->ws may get | ||
947 | * deactivated at any time. | ||
948 | */ | ||
949 | __pm_stay_awake(ep->ws); | ||
950 | } | ||
951 | |||
930 | } | 952 | } |
931 | goto out_unlock; | 953 | goto out_unlock; |
932 | } | 954 | } |
933 | 955 | ||
934 | /* If this file is already in the ready list we exit soon */ | 956 | /* If this file is already in the ready list we exit soon */ |
935 | if (!ep_is_linked(&epi->rdllink)) | 957 | if (!ep_is_linked(&epi->rdllink)) { |
936 | list_add_tail(&epi->rdllink, &ep->rdllist); | 958 | list_add_tail(&epi->rdllink, &ep->rdllist); |
959 | __pm_stay_awake(epi->ws); | ||
960 | } | ||
937 | 961 | ||
938 | /* | 962 | /* |
939 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() | 963 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() |
@@ -1091,6 +1115,30 @@ static int reverse_path_check(void) | |||
1091 | return error; | 1115 | return error; |
1092 | } | 1116 | } |
1093 | 1117 | ||
1118 | static int ep_create_wakeup_source(struct epitem *epi) | ||
1119 | { | ||
1120 | const char *name; | ||
1121 | |||
1122 | if (!epi->ep->ws) { | ||
1123 | epi->ep->ws = wakeup_source_register("eventpoll"); | ||
1124 | if (!epi->ep->ws) | ||
1125 | return -ENOMEM; | ||
1126 | } | ||
1127 | |||
1128 | name = epi->ffd.file->f_path.dentry->d_name.name; | ||
1129 | epi->ws = wakeup_source_register(name); | ||
1130 | if (!epi->ws) | ||
1131 | return -ENOMEM; | ||
1132 | |||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1136 | static void ep_destroy_wakeup_source(struct epitem *epi) | ||
1137 | { | ||
1138 | wakeup_source_unregister(epi->ws); | ||
1139 | epi->ws = NULL; | ||
1140 | } | ||
1141 | |||
1094 | /* | 1142 | /* |
1095 | * Must be called with "mtx" held. | 1143 | * Must be called with "mtx" held. |
1096 | */ | 1144 | */ |
@@ -1118,6 +1166,13 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
1118 | epi->event = *event; | 1166 | epi->event = *event; |
1119 | epi->nwait = 0; | 1167 | epi->nwait = 0; |
1120 | epi->next = EP_UNACTIVE_PTR; | 1168 | epi->next = EP_UNACTIVE_PTR; |
1169 | if (epi->event.events & EPOLLWAKEUP) { | ||
1170 | error = ep_create_wakeup_source(epi); | ||
1171 | if (error) | ||
1172 | goto error_create_wakeup_source; | ||
1173 | } else { | ||
1174 | epi->ws = NULL; | ||
1175 | } | ||
1121 | 1176 | ||
1122 | /* Initialize the poll table using the queue callback */ | 1177 | /* Initialize the poll table using the queue callback */ |
1123 | epq.epi = epi; | 1178 | epq.epi = epi; |
@@ -1164,6 +1219,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
1164 | /* If the file is already "ready" we drop it inside the ready list */ | 1219 | /* If the file is already "ready" we drop it inside the ready list */ |
1165 | if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { | 1220 | if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { |
1166 | list_add_tail(&epi->rdllink, &ep->rdllist); | 1221 | list_add_tail(&epi->rdllink, &ep->rdllist); |
1222 | __pm_stay_awake(epi->ws); | ||
1167 | 1223 | ||
1168 | /* Notify waiting tasks that events are available */ | 1224 | /* Notify waiting tasks that events are available */ |
1169 | if (waitqueue_active(&ep->wq)) | 1225 | if (waitqueue_active(&ep->wq)) |
@@ -1204,6 +1260,9 @@ error_unregister: | |||
1204 | list_del_init(&epi->rdllink); | 1260 | list_del_init(&epi->rdllink); |
1205 | spin_unlock_irqrestore(&ep->lock, flags); | 1261 | spin_unlock_irqrestore(&ep->lock, flags); |
1206 | 1262 | ||
1263 | wakeup_source_unregister(epi->ws); | ||
1264 | |||
1265 | error_create_wakeup_source: | ||
1207 | kmem_cache_free(epi_cache, epi); | 1266 | kmem_cache_free(epi_cache, epi); |
1208 | 1267 | ||
1209 | return error; | 1268 | return error; |
@@ -1229,6 +1288,12 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even | |||
1229 | epi->event.events = event->events; | 1288 | epi->event.events = event->events; |
1230 | pt._key = event->events; | 1289 | pt._key = event->events; |
1231 | epi->event.data = event->data; /* protected by mtx */ | 1290 | epi->event.data = event->data; /* protected by mtx */ |
1291 | if (epi->event.events & EPOLLWAKEUP) { | ||
1292 | if (!epi->ws) | ||
1293 | ep_create_wakeup_source(epi); | ||
1294 | } else if (epi->ws) { | ||
1295 | ep_destroy_wakeup_source(epi); | ||
1296 | } | ||
1232 | 1297 | ||
1233 | /* | 1298 | /* |
1234 | * Get current event bits. We can safely use the file* here because | 1299 | * Get current event bits. We can safely use the file* here because |
@@ -1244,6 +1309,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even | |||
1244 | spin_lock_irq(&ep->lock); | 1309 | spin_lock_irq(&ep->lock); |
1245 | if (!ep_is_linked(&epi->rdllink)) { | 1310 | if (!ep_is_linked(&epi->rdllink)) { |
1246 | list_add_tail(&epi->rdllink, &ep->rdllist); | 1311 | list_add_tail(&epi->rdllink, &ep->rdllist); |
1312 | __pm_stay_awake(epi->ws); | ||
1247 | 1313 | ||
1248 | /* Notify waiting tasks that events are available */ | 1314 | /* Notify waiting tasks that events are available */ |
1249 | if (waitqueue_active(&ep->wq)) | 1315 | if (waitqueue_active(&ep->wq)) |
@@ -1282,6 +1348,18 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1282 | !list_empty(head) && eventcnt < esed->maxevents;) { | 1348 | !list_empty(head) && eventcnt < esed->maxevents;) { |
1283 | epi = list_first_entry(head, struct epitem, rdllink); | 1349 | epi = list_first_entry(head, struct epitem, rdllink); |
1284 | 1350 | ||
1351 | /* | ||
1352 | * Activate ep->ws before deactivating epi->ws to prevent | ||
1353 | * triggering auto-suspend here (in case we reactive epi->ws | ||
1354 | * below). | ||
1355 | * | ||
1356 | * This could be rearranged to delay the deactivation of epi->ws | ||
1357 | * instead, but then epi->ws would temporarily be out of sync | ||
1358 | * with ep_is_linked(). | ||
1359 | */ | ||
1360 | if (epi->ws && epi->ws->active) | ||
1361 | __pm_stay_awake(ep->ws); | ||
1362 | __pm_relax(epi->ws); | ||
1285 | list_del_init(&epi->rdllink); | 1363 | list_del_init(&epi->rdllink); |
1286 | 1364 | ||
1287 | pt._key = epi->event.events; | 1365 | pt._key = epi->event.events; |
@@ -1298,6 +1376,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1298 | if (__put_user(revents, &uevent->events) || | 1376 | if (__put_user(revents, &uevent->events) || |
1299 | __put_user(epi->event.data, &uevent->data)) { | 1377 | __put_user(epi->event.data, &uevent->data)) { |
1300 | list_add(&epi->rdllink, head); | 1378 | list_add(&epi->rdllink, head); |
1379 | __pm_stay_awake(epi->ws); | ||
1301 | return eventcnt ? eventcnt : -EFAULT; | 1380 | return eventcnt ? eventcnt : -EFAULT; |
1302 | } | 1381 | } |
1303 | eventcnt++; | 1382 | eventcnt++; |
@@ -1317,6 +1396,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | |||
1317 | * poll callback will queue them in ep->ovflist. | 1396 | * poll callback will queue them in ep->ovflist. |
1318 | */ | 1397 | */ |
1319 | list_add_tail(&epi->rdllink, &ep->rdllist); | 1398 | list_add_tail(&epi->rdllink, &ep->rdllist); |
1399 | __pm_stay_awake(epi->ws); | ||
1320 | } | 1400 | } |
1321 | } | 1401 | } |
1322 | } | 1402 | } |
@@ -1629,6 +1709,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1629 | if (!tfile->f_op || !tfile->f_op->poll) | 1709 | if (!tfile->f_op || !tfile->f_op->poll) |
1630 | goto error_tgt_fput; | 1710 | goto error_tgt_fput; |
1631 | 1711 | ||
1712 | /* Check if EPOLLWAKEUP is allowed */ | ||
1713 | if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP)) | ||
1714 | goto error_tgt_fput; | ||
1715 | |||
1632 | /* | 1716 | /* |
1633 | * We have to check that the file structure underneath the file descriptor | 1717 | * We have to check that the file structure underneath the file descriptor |
1634 | * the user passed to us _is_ an eventpoll file. And also we do not permit | 1718 | * the user passed to us _is_ an eventpoll file. And also we do not permit |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 12d52dedb229..c398cff3dab7 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -360,8 +360,11 @@ struct cpu_vfs_cap_data { | |||
360 | 360 | ||
361 | #define CAP_WAKE_ALARM 35 | 361 | #define CAP_WAKE_ALARM 35 |
362 | 362 | ||
363 | /* Allow preventing system suspends while epoll events are pending */ | ||
363 | 364 | ||
364 | #define CAP_LAST_CAP CAP_WAKE_ALARM | 365 | #define CAP_EPOLLWAKEUP 36 |
366 | |||
367 | #define CAP_LAST_CAP CAP_EPOLLWAKEUP | ||
365 | 368 | ||
366 | #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) | 369 | #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) |
367 | 370 | ||
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 657ab55beda0..6f8be328770a 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h | |||
@@ -26,6 +26,18 @@ | |||
26 | #define EPOLL_CTL_DEL 2 | 26 | #define EPOLL_CTL_DEL 2 |
27 | #define EPOLL_CTL_MOD 3 | 27 | #define EPOLL_CTL_MOD 3 |
28 | 28 | ||
29 | /* | ||
30 | * Request the handling of system wakeup events so as to prevent system suspends | ||
31 | * from happening while those events are being processed. | ||
32 | * | ||
33 | * Assuming neither EPOLLET nor EPOLLONESHOT is set, system suspends will not be | ||
34 | * re-allowed until epoll_wait is called again after consuming the wakeup | ||
35 | * event(s). | ||
36 | * | ||
37 | * Requires CAP_EPOLLWAKEUP | ||
38 | */ | ||
39 | #define EPOLLWAKEUP (1 << 29) | ||
40 | |||
29 | /* Set the One Shot behaviour for the target file descriptor */ | 41 | /* Set the One Shot behaviour for the target file descriptor */ |
30 | #define EPOLLONESHOT (1 << 30) | 42 | #define EPOLLONESHOT (1 << 30) |
31 | 43 | ||
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index d9f05113e5fb..569781faa504 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -33,12 +33,15 @@ | |||
33 | * | 33 | * |
34 | * @total_time: Total time this wakeup source has been active. | 34 | * @total_time: Total time this wakeup source has been active. |
35 | * @max_time: Maximum time this wakeup source has been continuously active. | 35 | * @max_time: Maximum time this wakeup source has been continuously active. |
36 | * @last_time: Monotonic clock when the wakeup source's was activated last time. | 36 | * @last_time: Monotonic clock when the wakeup source's was touched last time. |
37 | * @prevent_sleep_time: Total time this source has been preventing autosleep. | ||
37 | * @event_count: Number of signaled wakeup events. | 38 | * @event_count: Number of signaled wakeup events. |
38 | * @active_count: Number of times the wakeup sorce was activated. | 39 | * @active_count: Number of times the wakeup sorce was activated. |
39 | * @relax_count: Number of times the wakeup sorce was deactivated. | 40 | * @relax_count: Number of times the wakeup sorce was deactivated. |
40 | * @hit_count: Number of times the wakeup sorce might abort system suspend. | 41 | * @expire_count: Number of times the wakeup source's timeout has expired. |
42 | * @wakeup_count: Number of times the wakeup source might abort suspend. | ||
41 | * @active: Status of the wakeup source. | 43 | * @active: Status of the wakeup source. |
44 | * @has_timeout: The wakeup source has been activated with a timeout. | ||
42 | */ | 45 | */ |
43 | struct wakeup_source { | 46 | struct wakeup_source { |
44 | const char *name; | 47 | const char *name; |
@@ -49,11 +52,15 @@ struct wakeup_source { | |||
49 | ktime_t total_time; | 52 | ktime_t total_time; |
50 | ktime_t max_time; | 53 | ktime_t max_time; |
51 | ktime_t last_time; | 54 | ktime_t last_time; |
55 | ktime_t start_prevent_time; | ||
56 | ktime_t prevent_sleep_time; | ||
52 | unsigned long event_count; | 57 | unsigned long event_count; |
53 | unsigned long active_count; | 58 | unsigned long active_count; |
54 | unsigned long relax_count; | 59 | unsigned long relax_count; |
55 | unsigned long hit_count; | 60 | unsigned long expire_count; |
56 | unsigned int active:1; | 61 | unsigned long wakeup_count; |
62 | bool active:1; | ||
63 | bool autosleep_enabled:1; | ||
57 | }; | 64 | }; |
58 | 65 | ||
59 | #ifdef CONFIG_PM_SLEEP | 66 | #ifdef CONFIG_PM_SLEEP |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index ac1c114c499d..cd83059fb592 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -356,8 +356,9 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
356 | extern bool events_check_enabled; | 356 | extern bool events_check_enabled; |
357 | 357 | ||
358 | extern bool pm_wakeup_pending(void); | 358 | extern bool pm_wakeup_pending(void); |
359 | extern bool pm_get_wakeup_count(unsigned int *count); | 359 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); |
360 | extern bool pm_save_wakeup_count(unsigned int count); | 360 | extern bool pm_save_wakeup_count(unsigned int count); |
361 | extern void pm_wakep_autosleep_enabled(bool set); | ||
361 | 362 | ||
362 | static inline void lock_system_sleep(void) | 363 | static inline void lock_system_sleep(void) |
363 | { | 364 | { |
@@ -407,6 +408,17 @@ static inline void unlock_system_sleep(void) {} | |||
407 | 408 | ||
408 | #endif /* !CONFIG_PM_SLEEP */ | 409 | #endif /* !CONFIG_PM_SLEEP */ |
409 | 410 | ||
411 | #ifdef CONFIG_PM_AUTOSLEEP | ||
412 | |||
413 | /* kernel/power/autosleep.c */ | ||
414 | void queue_up_suspend_work(void); | ||
415 | |||
416 | #else /* !CONFIG_PM_AUTOSLEEP */ | ||
417 | |||
418 | static inline void queue_up_suspend_work(void) {} | ||
419 | |||
420 | #endif /* !CONFIG_PM_AUTOSLEEP */ | ||
421 | |||
410 | #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS | 422 | #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS |
411 | /* | 423 | /* |
412 | * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture | 424 | * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture |
diff --git a/include/trace/events/power.h b/include/trace/events/power.h index cae9a94f025d..0c9783841a30 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h | |||
@@ -65,6 +65,40 @@ TRACE_EVENT(machine_suspend, | |||
65 | TP_printk("state=%lu", (unsigned long)__entry->state) | 65 | TP_printk("state=%lu", (unsigned long)__entry->state) |
66 | ); | 66 | ); |
67 | 67 | ||
68 | DECLARE_EVENT_CLASS(wakeup_source, | ||
69 | |||
70 | TP_PROTO(const char *name, unsigned int state), | ||
71 | |||
72 | TP_ARGS(name, state), | ||
73 | |||
74 | TP_STRUCT__entry( | ||
75 | __string( name, name ) | ||
76 | __field( u64, state ) | ||
77 | ), | ||
78 | |||
79 | TP_fast_assign( | ||
80 | __assign_str(name, name); | ||
81 | __entry->state = state; | ||
82 | ), | ||
83 | |||
84 | TP_printk("%s state=0x%lx", __get_str(name), | ||
85 | (unsigned long)__entry->state) | ||
86 | ); | ||
87 | |||
88 | DEFINE_EVENT(wakeup_source, wakeup_source_activate, | ||
89 | |||
90 | TP_PROTO(const char *name, unsigned int state), | ||
91 | |||
92 | TP_ARGS(name, state) | ||
93 | ); | ||
94 | |||
95 | DEFINE_EVENT(wakeup_source, wakeup_source_deactivate, | ||
96 | |||
97 | TP_PROTO(const char *name, unsigned int state), | ||
98 | |||
99 | TP_ARGS(name, state) | ||
100 | ); | ||
101 | |||
68 | #ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED | 102 | #ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED |
69 | 103 | ||
70 | /* | 104 | /* |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index deb5461e3216..8f9b4eb974e0 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -103,6 +103,33 @@ config PM_SLEEP_SMP | |||
103 | select HOTPLUG | 103 | select HOTPLUG |
104 | select HOTPLUG_CPU | 104 | select HOTPLUG_CPU |
105 | 105 | ||
106 | config PM_AUTOSLEEP | ||
107 | bool "Opportunistic sleep" | ||
108 | depends on PM_SLEEP | ||
109 | default n | ||
110 | ---help--- | ||
111 | Allow the kernel to trigger a system transition into a global sleep | ||
112 | state automatically whenever there are no active wakeup sources. | ||
113 | |||
114 | config PM_WAKELOCKS | ||
115 | bool "User space wakeup sources interface" | ||
116 | depends on PM_SLEEP | ||
117 | default n | ||
118 | ---help--- | ||
119 | Allow user space to create, activate and deactivate wakeup source | ||
120 | objects with the help of a sysfs-based interface. | ||
121 | |||
122 | config PM_WAKELOCKS_LIMIT | ||
123 | int "Maximum number of user space wakeup sources (0 = no limit)" | ||
124 | range 0 100000 | ||
125 | default 100 | ||
126 | depends on PM_WAKELOCKS | ||
127 | |||
128 | config PM_WAKELOCKS_GC | ||
129 | bool "Garbage collector for user space wakeup sources" | ||
130 | depends on PM_WAKELOCKS | ||
131 | default y | ||
132 | |||
106 | config PM_RUNTIME | 133 | config PM_RUNTIME |
107 | bool "Run-time PM core functionality" | 134 | bool "Run-time PM core functionality" |
108 | depends on !IA64_HP_SIM | 135 | depends on !IA64_HP_SIM |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 66d808ec5252..29472bff11ef 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -9,5 +9,7 @@ obj-$(CONFIG_SUSPEND) += suspend.o | |||
9 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 9 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
10 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ | 10 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ |
11 | block_io.o | 11 | block_io.o |
12 | obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o | ||
13 | obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o | ||
12 | 14 | ||
13 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 15 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c new file mode 100644 index 000000000000..ca304046d9e2 --- /dev/null +++ b/kernel/power/autosleep.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * kernel/power/autosleep.c | ||
3 | * | ||
4 | * Opportunistic sleep support. | ||
5 | * | ||
6 | * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/pm_wakeup.h> | ||
12 | |||
13 | #include "power.h" | ||
14 | |||
15 | static suspend_state_t autosleep_state; | ||
16 | static struct workqueue_struct *autosleep_wq; | ||
17 | /* | ||
18 | * Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source | ||
19 | * is active, otherwise a deadlock with try_to_suspend() is possible. | ||
20 | * Alternatively mutex_lock_interruptible() can be used. This will then fail | ||
21 | * if an auto_sleep cycle tries to freeze processes. | ||
22 | */ | ||
23 | static DEFINE_MUTEX(autosleep_lock); | ||
24 | static struct wakeup_source *autosleep_ws; | ||
25 | |||
26 | static void try_to_suspend(struct work_struct *work) | ||
27 | { | ||
28 | unsigned int initial_count, final_count; | ||
29 | |||
30 | if (!pm_get_wakeup_count(&initial_count, true)) | ||
31 | goto out; | ||
32 | |||
33 | mutex_lock(&autosleep_lock); | ||
34 | |||
35 | if (!pm_save_wakeup_count(initial_count)) { | ||
36 | mutex_unlock(&autosleep_lock); | ||
37 | goto out; | ||
38 | } | ||
39 | |||
40 | if (autosleep_state == PM_SUSPEND_ON) { | ||
41 | mutex_unlock(&autosleep_lock); | ||
42 | return; | ||
43 | } | ||
44 | if (autosleep_state >= PM_SUSPEND_MAX) | ||
45 | hibernate(); | ||
46 | else | ||
47 | pm_suspend(autosleep_state); | ||
48 | |||
49 | mutex_unlock(&autosleep_lock); | ||
50 | |||
51 | if (!pm_get_wakeup_count(&final_count, false)) | ||
52 | goto out; | ||
53 | |||
54 | /* | ||
55 | * If the wakeup occured for an unknown reason, wait to prevent the | ||
56 | * system from trying to suspend and waking up in a tight loop. | ||
57 | */ | ||
58 | if (final_count == initial_count) | ||
59 | schedule_timeout_uninterruptible(HZ / 2); | ||
60 | |||
61 | out: | ||
62 | queue_up_suspend_work(); | ||
63 | } | ||
64 | |||
65 | static DECLARE_WORK(suspend_work, try_to_suspend); | ||
66 | |||
67 | void queue_up_suspend_work(void) | ||
68 | { | ||
69 | if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON) | ||
70 | queue_work(autosleep_wq, &suspend_work); | ||
71 | } | ||
72 | |||
73 | suspend_state_t pm_autosleep_state(void) | ||
74 | { | ||
75 | return autosleep_state; | ||
76 | } | ||
77 | |||
78 | int pm_autosleep_lock(void) | ||
79 | { | ||
80 | return mutex_lock_interruptible(&autosleep_lock); | ||
81 | } | ||
82 | |||
83 | void pm_autosleep_unlock(void) | ||
84 | { | ||
85 | mutex_unlock(&autosleep_lock); | ||
86 | } | ||
87 | |||
88 | int pm_autosleep_set_state(suspend_state_t state) | ||
89 | { | ||
90 | |||
91 | #ifndef CONFIG_HIBERNATION | ||
92 | if (state >= PM_SUSPEND_MAX) | ||
93 | return -EINVAL; | ||
94 | #endif | ||
95 | |||
96 | __pm_stay_awake(autosleep_ws); | ||
97 | |||
98 | mutex_lock(&autosleep_lock); | ||
99 | |||
100 | autosleep_state = state; | ||
101 | |||
102 | __pm_relax(autosleep_ws); | ||
103 | |||
104 | if (state > PM_SUSPEND_ON) { | ||
105 | pm_wakep_autosleep_enabled(true); | ||
106 | queue_up_suspend_work(); | ||
107 | } else { | ||
108 | pm_wakep_autosleep_enabled(false); | ||
109 | } | ||
110 | |||
111 | mutex_unlock(&autosleep_lock); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | int __init pm_autosleep_init(void) | ||
116 | { | ||
117 | autosleep_ws = wakeup_source_register("autosleep"); | ||
118 | if (!autosleep_ws) | ||
119 | return -ENOMEM; | ||
120 | |||
121 | autosleep_wq = alloc_ordered_workqueue("autosleep", 0); | ||
122 | if (autosleep_wq) | ||
123 | return 0; | ||
124 | |||
125 | wakeup_source_unregister(autosleep_ws); | ||
126 | return -ENOMEM; | ||
127 | } | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 1c12581f1c62..428f8a034e96 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -269,8 +269,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
269 | return (s - buf); | 269 | return (s - buf); |
270 | } | 270 | } |
271 | 271 | ||
272 | static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | 272 | static suspend_state_t decode_state(const char *buf, size_t n) |
273 | const char *buf, size_t n) | ||
274 | { | 273 | { |
275 | #ifdef CONFIG_SUSPEND | 274 | #ifdef CONFIG_SUSPEND |
276 | suspend_state_t state = PM_SUSPEND_STANDBY; | 275 | suspend_state_t state = PM_SUSPEND_STANDBY; |
@@ -278,27 +277,48 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
278 | #endif | 277 | #endif |
279 | char *p; | 278 | char *p; |
280 | int len; | 279 | int len; |
281 | int error = -EINVAL; | ||
282 | 280 | ||
283 | p = memchr(buf, '\n', n); | 281 | p = memchr(buf, '\n', n); |
284 | len = p ? p - buf : n; | 282 | len = p ? p - buf : n; |
285 | 283 | ||
286 | /* First, check if we are requested to hibernate */ | 284 | /* Check hibernation first. */ |
287 | if (len == 4 && !strncmp(buf, "disk", len)) { | 285 | if (len == 4 && !strncmp(buf, "disk", len)) |
288 | error = hibernate(); | 286 | return PM_SUSPEND_MAX; |
289 | goto Exit; | ||
290 | } | ||
291 | 287 | ||
292 | #ifdef CONFIG_SUSPEND | 288 | #ifdef CONFIG_SUSPEND |
293 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { | 289 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) |
294 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { | 290 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) |
295 | error = pm_suspend(state); | 291 | return state; |
296 | break; | ||
297 | } | ||
298 | } | ||
299 | #endif | 292 | #endif |
300 | 293 | ||
301 | Exit: | 294 | return PM_SUSPEND_ON; |
295 | } | ||
296 | |||
297 | static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
298 | const char *buf, size_t n) | ||
299 | { | ||
300 | suspend_state_t state; | ||
301 | int error; | ||
302 | |||
303 | error = pm_autosleep_lock(); | ||
304 | if (error) | ||
305 | return error; | ||
306 | |||
307 | if (pm_autosleep_state() > PM_SUSPEND_ON) { | ||
308 | error = -EBUSY; | ||
309 | goto out; | ||
310 | } | ||
311 | |||
312 | state = decode_state(buf, n); | ||
313 | if (state < PM_SUSPEND_MAX) | ||
314 | error = pm_suspend(state); | ||
315 | else if (state == PM_SUSPEND_MAX) | ||
316 | error = hibernate(); | ||
317 | else | ||
318 | error = -EINVAL; | ||
319 | |||
320 | out: | ||
321 | pm_autosleep_unlock(); | ||
302 | return error ? error : n; | 322 | return error ? error : n; |
303 | } | 323 | } |
304 | 324 | ||
@@ -339,7 +359,8 @@ static ssize_t wakeup_count_show(struct kobject *kobj, | |||
339 | { | 359 | { |
340 | unsigned int val; | 360 | unsigned int val; |
341 | 361 | ||
342 | return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR; | 362 | return pm_get_wakeup_count(&val, true) ? |
363 | sprintf(buf, "%u\n", val) : -EINTR; | ||
343 | } | 364 | } |
344 | 365 | ||
345 | static ssize_t wakeup_count_store(struct kobject *kobj, | 366 | static ssize_t wakeup_count_store(struct kobject *kobj, |
@@ -347,15 +368,106 @@ static ssize_t wakeup_count_store(struct kobject *kobj, | |||
347 | const char *buf, size_t n) | 368 | const char *buf, size_t n) |
348 | { | 369 | { |
349 | unsigned int val; | 370 | unsigned int val; |
371 | int error; | ||
372 | |||
373 | error = pm_autosleep_lock(); | ||
374 | if (error) | ||
375 | return error; | ||
376 | |||
377 | if (pm_autosleep_state() > PM_SUSPEND_ON) { | ||
378 | error = -EBUSY; | ||
379 | goto out; | ||
380 | } | ||
350 | 381 | ||
382 | error = -EINVAL; | ||
351 | if (sscanf(buf, "%u", &val) == 1) { | 383 | if (sscanf(buf, "%u", &val) == 1) { |
352 | if (pm_save_wakeup_count(val)) | 384 | if (pm_save_wakeup_count(val)) |
353 | return n; | 385 | error = n; |
354 | } | 386 | } |
355 | return -EINVAL; | 387 | |
388 | out: | ||
389 | pm_autosleep_unlock(); | ||
390 | return error; | ||
356 | } | 391 | } |
357 | 392 | ||
358 | power_attr(wakeup_count); | 393 | power_attr(wakeup_count); |
394 | |||
395 | #ifdef CONFIG_PM_AUTOSLEEP | ||
396 | static ssize_t autosleep_show(struct kobject *kobj, | ||
397 | struct kobj_attribute *attr, | ||
398 | char *buf) | ||
399 | { | ||
400 | suspend_state_t state = pm_autosleep_state(); | ||
401 | |||
402 | if (state == PM_SUSPEND_ON) | ||
403 | return sprintf(buf, "off\n"); | ||
404 | |||
405 | #ifdef CONFIG_SUSPEND | ||
406 | if (state < PM_SUSPEND_MAX) | ||
407 | return sprintf(buf, "%s\n", valid_state(state) ? | ||
408 | pm_states[state] : "error"); | ||
409 | #endif | ||
410 | #ifdef CONFIG_HIBERNATION | ||
411 | return sprintf(buf, "disk\n"); | ||
412 | #else | ||
413 | return sprintf(buf, "error"); | ||
414 | #endif | ||
415 | } | ||
416 | |||
417 | static ssize_t autosleep_store(struct kobject *kobj, | ||
418 | struct kobj_attribute *attr, | ||
419 | const char *buf, size_t n) | ||
420 | { | ||
421 | suspend_state_t state = decode_state(buf, n); | ||
422 | int error; | ||
423 | |||
424 | if (state == PM_SUSPEND_ON | ||
425 | && strcmp(buf, "off") && strcmp(buf, "off\n")) | ||
426 | return -EINVAL; | ||
427 | |||
428 | error = pm_autosleep_set_state(state); | ||
429 | return error ? error : n; | ||
430 | } | ||
431 | |||
432 | power_attr(autosleep); | ||
433 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
434 | |||
435 | #ifdef CONFIG_PM_WAKELOCKS | ||
436 | static ssize_t wake_lock_show(struct kobject *kobj, | ||
437 | struct kobj_attribute *attr, | ||
438 | char *buf) | ||
439 | { | ||
440 | return pm_show_wakelocks(buf, true); | ||
441 | } | ||
442 | |||
443 | static ssize_t wake_lock_store(struct kobject *kobj, | ||
444 | struct kobj_attribute *attr, | ||
445 | const char *buf, size_t n) | ||
446 | { | ||
447 | int error = pm_wake_lock(buf); | ||
448 | return error ? error : n; | ||
449 | } | ||
450 | |||
451 | power_attr(wake_lock); | ||
452 | |||
453 | static ssize_t wake_unlock_show(struct kobject *kobj, | ||
454 | struct kobj_attribute *attr, | ||
455 | char *buf) | ||
456 | { | ||
457 | return pm_show_wakelocks(buf, false); | ||
458 | } | ||
459 | |||
460 | static ssize_t wake_unlock_store(struct kobject *kobj, | ||
461 | struct kobj_attribute *attr, | ||
462 | const char *buf, size_t n) | ||
463 | { | ||
464 | int error = pm_wake_unlock(buf); | ||
465 | return error ? error : n; | ||
466 | } | ||
467 | |||
468 | power_attr(wake_unlock); | ||
469 | |||
470 | #endif /* CONFIG_PM_WAKELOCKS */ | ||
359 | #endif /* CONFIG_PM_SLEEP */ | 471 | #endif /* CONFIG_PM_SLEEP */ |
360 | 472 | ||
361 | #ifdef CONFIG_PM_TRACE | 473 | #ifdef CONFIG_PM_TRACE |
@@ -409,6 +521,13 @@ static struct attribute * g[] = { | |||
409 | #ifdef CONFIG_PM_SLEEP | 521 | #ifdef CONFIG_PM_SLEEP |
410 | &pm_async_attr.attr, | 522 | &pm_async_attr.attr, |
411 | &wakeup_count_attr.attr, | 523 | &wakeup_count_attr.attr, |
524 | #ifdef CONFIG_PM_AUTOSLEEP | ||
525 | &autosleep_attr.attr, | ||
526 | #endif | ||
527 | #ifdef CONFIG_PM_WAKELOCKS | ||
528 | &wake_lock_attr.attr, | ||
529 | &wake_unlock_attr.attr, | ||
530 | #endif | ||
412 | #ifdef CONFIG_PM_DEBUG | 531 | #ifdef CONFIG_PM_DEBUG |
413 | &pm_test_attr.attr, | 532 | &pm_test_attr.attr, |
414 | #endif | 533 | #endif |
@@ -444,7 +563,10 @@ static int __init pm_init(void) | |||
444 | power_kobj = kobject_create_and_add("power", NULL); | 563 | power_kobj = kobject_create_and_add("power", NULL); |
445 | if (!power_kobj) | 564 | if (!power_kobj) |
446 | return -ENOMEM; | 565 | return -ENOMEM; |
447 | return sysfs_create_group(power_kobj, &attr_group); | 566 | error = sysfs_create_group(power_kobj, &attr_group); |
567 | if (error) | ||
568 | return error; | ||
569 | return pm_autosleep_init(); | ||
448 | } | 570 | } |
449 | 571 | ||
450 | core_initcall(pm_init); | 572 | core_initcall(pm_init); |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 98f3622d7407..b0bd4beaebfe 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -264,3 +264,30 @@ static inline void suspend_thaw_processes(void) | |||
264 | { | 264 | { |
265 | } | 265 | } |
266 | #endif | 266 | #endif |
267 | |||
268 | #ifdef CONFIG_PM_AUTOSLEEP | ||
269 | |||
270 | /* kernel/power/autosleep.c */ | ||
271 | extern int pm_autosleep_init(void); | ||
272 | extern int pm_autosleep_lock(void); | ||
273 | extern void pm_autosleep_unlock(void); | ||
274 | extern suspend_state_t pm_autosleep_state(void); | ||
275 | extern int pm_autosleep_set_state(suspend_state_t state); | ||
276 | |||
277 | #else /* !CONFIG_PM_AUTOSLEEP */ | ||
278 | |||
279 | static inline int pm_autosleep_init(void) { return 0; } | ||
280 | static inline int pm_autosleep_lock(void) { return 0; } | ||
281 | static inline void pm_autosleep_unlock(void) {} | ||
282 | static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; } | ||
283 | |||
284 | #endif /* !CONFIG_PM_AUTOSLEEP */ | ||
285 | |||
286 | #ifdef CONFIG_PM_WAKELOCKS | ||
287 | |||
288 | /* kernel/power/wakelock.c */ | ||
289 | extern ssize_t pm_show_wakelocks(char *buf, bool show_active); | ||
290 | extern int pm_wake_lock(const char *buf); | ||
291 | extern int pm_wake_unlock(const char *buf); | ||
292 | |||
293 | #endif /* !CONFIG_PM_WAKELOCKS */ | ||
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index eef311a58a64..11e22c068e8b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> | 9 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
@@ -282,14 +282,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | |||
282 | return -ENOSPC; | 282 | return -ENOSPC; |
283 | 283 | ||
284 | if (bio_chain) { | 284 | if (bio_chain) { |
285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | |
286 | __GFP_NORETRY); | ||
286 | if (src) { | 287 | if (src) { |
287 | copy_page(src, buf); | 288 | copy_page(src, buf); |
288 | } else { | 289 | } else { |
289 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ | 290 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ |
290 | if (ret) | 291 | if (ret) |
291 | return ret; | 292 | return ret; |
292 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 293 | src = (void *)__get_free_page(__GFP_WAIT | |
294 | __GFP_NOWARN | | ||
295 | __GFP_NORETRY); | ||
293 | if (src) { | 296 | if (src) { |
294 | copy_page(src, buf); | 297 | copy_page(src, buf); |
295 | } else { | 298 | } else { |
@@ -367,12 +370,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, | |||
367 | clear_page(handle->cur); | 370 | clear_page(handle->cur); |
368 | handle->cur_swap = offset; | 371 | handle->cur_swap = offset; |
369 | handle->k = 0; | 372 | handle->k = 0; |
370 | } | 373 | |
371 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { | 374 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { |
372 | error = hib_wait_on_bio_chain(bio_chain); | 375 | error = hib_wait_on_bio_chain(bio_chain); |
373 | if (error) | 376 | if (error) |
374 | goto out; | 377 | goto out; |
375 | handle->reqd_free_pages = reqd_free_pages(); | 378 | /* |
379 | * Recalculate the number of required free pages, to | ||
380 | * make sure we never take more than half. | ||
381 | */ | ||
382 | handle->reqd_free_pages = reqd_free_pages(); | ||
383 | } | ||
376 | } | 384 | } |
377 | out: | 385 | out: |
378 | return error; | 386 | return error; |
@@ -419,8 +427,9 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
419 | /* Maximum number of threads for compression/decompression. */ | 427 | /* Maximum number of threads for compression/decompression. */ |
420 | #define LZO_THREADS 3 | 428 | #define LZO_THREADS 3 |
421 | 429 | ||
422 | /* Maximum number of pages for read buffering. */ | 430 | /* Minimum/maximum number of pages for read buffering. */ |
423 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) | 431 | #define LZO_MIN_RD_PAGES 1024 |
432 | #define LZO_MAX_RD_PAGES 8192 | ||
424 | 433 | ||
425 | 434 | ||
426 | /** | 435 | /** |
@@ -631,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
631 | } | 640 | } |
632 | 641 | ||
633 | /* | 642 | /* |
634 | * Adjust number of free pages after all allocations have been done. | ||
635 | * We don't want to run out of pages when writing. | ||
636 | */ | ||
637 | handle->reqd_free_pages = reqd_free_pages(); | ||
638 | |||
639 | /* | ||
640 | * Start the CRC32 thread. | 643 | * Start the CRC32 thread. |
641 | */ | 644 | */ |
642 | init_waitqueue_head(&crc->go); | 645 | init_waitqueue_head(&crc->go); |
@@ -657,6 +660,12 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
657 | goto out_clean; | 660 | goto out_clean; |
658 | } | 661 | } |
659 | 662 | ||
663 | /* | ||
664 | * Adjust the number of required free pages after all allocations have | ||
665 | * been done. We don't want to run out of pages when writing. | ||
666 | */ | ||
667 | handle->reqd_free_pages = reqd_free_pages(); | ||
668 | |||
660 | printk(KERN_INFO | 669 | printk(KERN_INFO |
661 | "PM: Using %u thread(s) for compression.\n" | 670 | "PM: Using %u thread(s) for compression.\n" |
662 | "PM: Compressing and saving image data (%u pages) ... ", | 671 | "PM: Compressing and saving image data (%u pages) ... ", |
@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1067 | unsigned i, thr, run_threads, nr_threads; | 1076 | unsigned i, thr, run_threads, nr_threads; |
1068 | unsigned ring = 0, pg = 0, ring_size = 0, | 1077 | unsigned ring = 0, pg = 0, ring_size = 0, |
1069 | have = 0, want, need, asked = 0; | 1078 | have = 0, want, need, asked = 0; |
1070 | unsigned long read_pages; | 1079 | unsigned long read_pages = 0; |
1071 | unsigned char **page = NULL; | 1080 | unsigned char **page = NULL; |
1072 | struct dec_data *data = NULL; | 1081 | struct dec_data *data = NULL; |
1073 | struct crc_data *crc = NULL; | 1082 | struct crc_data *crc = NULL; |
@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1079 | nr_threads = num_online_cpus() - 1; | 1088 | nr_threads = num_online_cpus() - 1; |
1080 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | 1089 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
1081 | 1090 | ||
1082 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); | 1091 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); |
1083 | if (!page) { | 1092 | if (!page) { |
1084 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1093 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
1085 | ret = -ENOMEM; | 1094 | ret = -ENOMEM; |
@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1144 | } | 1153 | } |
1145 | 1154 | ||
1146 | /* | 1155 | /* |
1147 | * Adjust number of pages for read buffering, in case we are short. | 1156 | * Set the number of pages for read buffering. |
1157 | * This is complete guesswork, because we'll only know the real | ||
1158 | * picture once prepare_image() is called, which is much later on | ||
1159 | * during the image load phase. We'll assume the worst case and | ||
1160 | * say that none of the image pages are from high memory. | ||
1148 | */ | 1161 | */ |
1149 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; | 1162 | if (low_free_pages() > snapshot_get_image_size()) |
1150 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); | 1163 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; |
1164 | read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); | ||
1151 | 1165 | ||
1152 | for (i = 0; i < read_pages; i++) { | 1166 | for (i = 0; i < read_pages; i++) { |
1153 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | 1167 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? |
1154 | __GFP_WAIT | __GFP_HIGH : | 1168 | __GFP_WAIT | __GFP_HIGH : |
1155 | __GFP_WAIT); | 1169 | __GFP_WAIT | __GFP_NOWARN | |
1170 | __GFP_NORETRY); | ||
1171 | |||
1156 | if (!page[i]) { | 1172 | if (!page[i]) { |
1157 | if (i < LZO_CMP_PAGES) { | 1173 | if (i < LZO_CMP_PAGES) { |
1158 | ring_size = i; | 1174 | ring_size = i; |
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c new file mode 100644 index 000000000000..c8fba3380076 --- /dev/null +++ b/kernel/power/wakelock.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | * kernel/power/wakelock.c | ||
3 | * | ||
4 | * User space wakeup sources support. | ||
5 | * | ||
6 | * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | * | ||
8 | * This code is based on the analogous interface allowing user space to | ||
9 | * manipulate wakelocks on Android. | ||
10 | */ | ||
11 | |||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/hrtimer.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/rbtree.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | static DEFINE_MUTEX(wakelocks_lock); | ||
21 | |||
22 | struct wakelock { | ||
23 | char *name; | ||
24 | struct rb_node node; | ||
25 | struct wakeup_source ws; | ||
26 | #ifdef CONFIG_PM_WAKELOCKS_GC | ||
27 | struct list_head lru; | ||
28 | #endif | ||
29 | }; | ||
30 | |||
31 | static struct rb_root wakelocks_tree = RB_ROOT; | ||
32 | |||
33 | ssize_t pm_show_wakelocks(char *buf, bool show_active) | ||
34 | { | ||
35 | struct rb_node *node; | ||
36 | struct wakelock *wl; | ||
37 | char *str = buf; | ||
38 | char *end = buf + PAGE_SIZE; | ||
39 | |||
40 | mutex_lock(&wakelocks_lock); | ||
41 | |||
42 | for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { | ||
43 | wl = rb_entry(node, struct wakelock, node); | ||
44 | if (wl->ws.active == show_active) | ||
45 | str += scnprintf(str, end - str, "%s ", wl->name); | ||
46 | } | ||
47 | if (str > buf) | ||
48 | str--; | ||
49 | |||
50 | str += scnprintf(str, end - str, "\n"); | ||
51 | |||
52 | mutex_unlock(&wakelocks_lock); | ||
53 | return (str - buf); | ||
54 | } | ||
55 | |||
56 | #if CONFIG_PM_WAKELOCKS_LIMIT > 0 | ||
57 | static unsigned int number_of_wakelocks; | ||
58 | |||
59 | static inline bool wakelocks_limit_exceeded(void) | ||
60 | { | ||
61 | return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT; | ||
62 | } | ||
63 | |||
64 | static inline void increment_wakelocks_number(void) | ||
65 | { | ||
66 | number_of_wakelocks++; | ||
67 | } | ||
68 | |||
69 | static inline void decrement_wakelocks_number(void) | ||
70 | { | ||
71 | number_of_wakelocks--; | ||
72 | } | ||
73 | #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */ | ||
74 | static inline bool wakelocks_limit_exceeded(void) { return false; } | ||
75 | static inline void increment_wakelocks_number(void) {} | ||
76 | static inline void decrement_wakelocks_number(void) {} | ||
77 | #endif /* CONFIG_PM_WAKELOCKS_LIMIT */ | ||
78 | |||
79 | #ifdef CONFIG_PM_WAKELOCKS_GC | ||
80 | #define WL_GC_COUNT_MAX 100 | ||
81 | #define WL_GC_TIME_SEC 300 | ||
82 | |||
83 | static LIST_HEAD(wakelocks_lru_list); | ||
84 | static unsigned int wakelocks_gc_count; | ||
85 | |||
86 | static inline void wakelocks_lru_add(struct wakelock *wl) | ||
87 | { | ||
88 | list_add(&wl->lru, &wakelocks_lru_list); | ||
89 | } | ||
90 | |||
91 | static inline void wakelocks_lru_most_recent(struct wakelock *wl) | ||
92 | { | ||
93 | list_move(&wl->lru, &wakelocks_lru_list); | ||
94 | } | ||
95 | |||
96 | static void wakelocks_gc(void) | ||
97 | { | ||
98 | struct wakelock *wl, *aux; | ||
99 | ktime_t now; | ||
100 | |||
101 | if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) | ||
102 | return; | ||
103 | |||
104 | now = ktime_get(); | ||
105 | list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { | ||
106 | u64 idle_time_ns; | ||
107 | bool active; | ||
108 | |||
109 | spin_lock_irq(&wl->ws.lock); | ||
110 | idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time)); | ||
111 | active = wl->ws.active; | ||
112 | spin_unlock_irq(&wl->ws.lock); | ||
113 | |||
114 | if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC)) | ||
115 | break; | ||
116 | |||
117 | if (!active) { | ||
118 | wakeup_source_remove(&wl->ws); | ||
119 | rb_erase(&wl->node, &wakelocks_tree); | ||
120 | list_del(&wl->lru); | ||
121 | kfree(wl->name); | ||
122 | kfree(wl); | ||
123 | decrement_wakelocks_number(); | ||
124 | } | ||
125 | } | ||
126 | wakelocks_gc_count = 0; | ||
127 | } | ||
128 | #else /* !CONFIG_PM_WAKELOCKS_GC */ | ||
129 | static inline void wakelocks_lru_add(struct wakelock *wl) {} | ||
130 | static inline void wakelocks_lru_most_recent(struct wakelock *wl) {} | ||
131 | static inline void wakelocks_gc(void) {} | ||
132 | #endif /* !CONFIG_PM_WAKELOCKS_GC */ | ||
133 | |||
134 | static struct wakelock *wakelock_lookup_add(const char *name, size_t len, | ||
135 | bool add_if_not_found) | ||
136 | { | ||
137 | struct rb_node **node = &wakelocks_tree.rb_node; | ||
138 | struct rb_node *parent = *node; | ||
139 | struct wakelock *wl; | ||
140 | |||
141 | while (*node) { | ||
142 | int diff; | ||
143 | |||
144 | parent = *node; | ||
145 | wl = rb_entry(*node, struct wakelock, node); | ||
146 | diff = strncmp(name, wl->name, len); | ||
147 | if (diff == 0) { | ||
148 | if (wl->name[len]) | ||
149 | diff = -1; | ||
150 | else | ||
151 | return wl; | ||
152 | } | ||
153 | if (diff < 0) | ||
154 | node = &(*node)->rb_left; | ||
155 | else | ||
156 | node = &(*node)->rb_right; | ||
157 | } | ||
158 | if (!add_if_not_found) | ||
159 | return ERR_PTR(-EINVAL); | ||
160 | |||
161 | if (wakelocks_limit_exceeded()) | ||
162 | return ERR_PTR(-ENOSPC); | ||
163 | |||
164 | /* Not found, we have to add a new one. */ | ||
165 | wl = kzalloc(sizeof(*wl), GFP_KERNEL); | ||
166 | if (!wl) | ||
167 | return ERR_PTR(-ENOMEM); | ||
168 | |||
169 | wl->name = kstrndup(name, len, GFP_KERNEL); | ||
170 | if (!wl->name) { | ||
171 | kfree(wl); | ||
172 | return ERR_PTR(-ENOMEM); | ||
173 | } | ||
174 | wl->ws.name = wl->name; | ||
175 | wakeup_source_add(&wl->ws); | ||
176 | rb_link_node(&wl->node, parent, node); | ||
177 | rb_insert_color(&wl->node, &wakelocks_tree); | ||
178 | wakelocks_lru_add(wl); | ||
179 | increment_wakelocks_number(); | ||
180 | return wl; | ||
181 | } | ||
182 | |||
183 | int pm_wake_lock(const char *buf) | ||
184 | { | ||
185 | const char *str = buf; | ||
186 | struct wakelock *wl; | ||
187 | u64 timeout_ns = 0; | ||
188 | size_t len; | ||
189 | int ret = 0; | ||
190 | |||
191 | while (*str && !isspace(*str)) | ||
192 | str++; | ||
193 | |||
194 | len = str - buf; | ||
195 | if (!len) | ||
196 | return -EINVAL; | ||
197 | |||
198 | if (*str && *str != '\n') { | ||
199 | /* Find out if there's a valid timeout string appended. */ | ||
200 | ret = kstrtou64(skip_spaces(str), 10, &timeout_ns); | ||
201 | if (ret) | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | |||
205 | mutex_lock(&wakelocks_lock); | ||
206 | |||
207 | wl = wakelock_lookup_add(buf, len, true); | ||
208 | if (IS_ERR(wl)) { | ||
209 | ret = PTR_ERR(wl); | ||
210 | goto out; | ||
211 | } | ||
212 | if (timeout_ns) { | ||
213 | u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1; | ||
214 | |||
215 | do_div(timeout_ms, NSEC_PER_MSEC); | ||
216 | __pm_wakeup_event(&wl->ws, timeout_ms); | ||
217 | } else { | ||
218 | __pm_stay_awake(&wl->ws); | ||
219 | } | ||
220 | |||
221 | wakelocks_lru_most_recent(wl); | ||
222 | |||
223 | out: | ||
224 | mutex_unlock(&wakelocks_lock); | ||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | int pm_wake_unlock(const char *buf) | ||
229 | { | ||
230 | struct wakelock *wl; | ||
231 | size_t len; | ||
232 | int ret = 0; | ||
233 | |||
234 | len = strlen(buf); | ||
235 | if (!len) | ||
236 | return -EINVAL; | ||
237 | |||
238 | if (buf[len-1] == '\n') | ||
239 | len--; | ||
240 | |||
241 | if (!len) | ||
242 | return -EINVAL; | ||
243 | |||
244 | mutex_lock(&wakelocks_lock); | ||
245 | |||
246 | wl = wakelock_lookup_add(buf, len, false); | ||
247 | if (IS_ERR(wl)) { | ||
248 | ret = PTR_ERR(wl); | ||
249 | goto out; | ||
250 | } | ||
251 | __pm_relax(&wl->ws); | ||
252 | |||
253 | wakelocks_lru_most_recent(wl); | ||
254 | wakelocks_gc(); | ||
255 | |||
256 | out: | ||
257 | mutex_unlock(&wakelocks_lock); | ||
258 | return ret; | ||
259 | } | ||