aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 17:07:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 17:07:06 -0400
commit468f4d1a855f8039dabf441b8bf68cae264033ff (patch)
tree303ac5bc1ac3f86f136a30f9356e84f20dcbf13f /kernel
parenteb2689e06b3526c7684b09beecf26070f05ee825 (diff)
parent8714c8d74d313c3ba27bf9c2aaacb1ad71c644f8 (diff)
Merge tag 'pm-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: - Implementation of opportunistic suspend (autosleep) and user space interface for manipulating wakeup sources. - Hibernate updates from Bojan Smojver and Minho Ban. - Updates of the runtime PM core and generic PM domains framework related to PM QoS. - Assorted fixes. * tag 'pm-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (25 commits) epoll: Fix user space breakage related to EPOLLWAKEUP PM / Domains: Make it possible to add devices to inactive domains PM / Hibernate: Use get_gendisk to verify partition if resume_file is integer format PM / Domains: Fix computation of maximum domain off time PM / Domains: Fix link checking when add subdomain PM / Sleep: User space wakeup sources garbage collector Kconfig option PM / Sleep: Make the limit of user space wakeup sources configurable PM / Documentation: suspend-and-cpuhotplug.txt: Fix typo PM / Domains: Cache device stop and domain power off governor results, v3 PM / Domains: Make device removal more straightforward PM / Sleep: Fix a mistake in a conditional in autosleep_store() epoll: Add a flag, EPOLLWAKEUP, to prevent suspend while epoll events are ready PM / QoS: Create device constraints objects on notifier registration PM / Runtime: Remove device fields related to suspend time, v2 PM / Domains: Rework default domain power off governor function, v2 PM / Domains: Rework default device stop governor function, v2 PM / Sleep: Add user space interface for manipulating wakeup sources, v3 PM / Sleep: Add "prevent autosleep time" statistics to wakeup sources PM / Sleep: Implement opportunistic sleep, v2 PM / Sleep: Add wakeup_source_activate and wakeup_source_deactivate tracepoints ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/power/Kconfig27
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/autosleep.c127
-rw-r--r--kernel/power/hibernate.c13
-rw-r--r--kernel/power/main.c160
-rw-r--r--kernel/power/power.h27
-rw-r--r--kernel/power/swap.c62
-rw-r--r--kernel/power/wakelock.c259
8 files changed, 635 insertions, 42 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index deb5461e321..8f9b4eb974e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -103,6 +103,33 @@ config PM_SLEEP_SMP
103 select HOTPLUG 103 select HOTPLUG
104 select HOTPLUG_CPU 104 select HOTPLUG_CPU
105 105
106config PM_AUTOSLEEP
107 bool "Opportunistic sleep"
108 depends on PM_SLEEP
109 default n
110 ---help---
111 Allow the kernel to trigger a system transition into a global sleep
112 state automatically whenever there are no active wakeup sources.
113
114config PM_WAKELOCKS
115 bool "User space wakeup sources interface"
116 depends on PM_SLEEP
117 default n
118 ---help---
119 Allow user space to create, activate and deactivate wakeup source
120 objects with the help of a sysfs-based interface.
121
122config PM_WAKELOCKS_LIMIT
123 int "Maximum number of user space wakeup sources (0 = no limit)"
124 range 0 100000
125 default 100
126 depends on PM_WAKELOCKS
127
128config PM_WAKELOCKS_GC
129 bool "Garbage collector for user space wakeup sources"
130 depends on PM_WAKELOCKS
131 default y
132
106config PM_RUNTIME 133config PM_RUNTIME
107 bool "Run-time PM core functionality" 134 bool "Run-time PM core functionality"
108 depends on !IA64_HP_SIM 135 depends on !IA64_HP_SIM
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 66d808ec525..29472bff11e 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -9,5 +9,7 @@ obj-$(CONFIG_SUSPEND) += suspend.o
9obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 9obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
10obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 10obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
11 block_io.o 11 block_io.o
12obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o
13obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
12 14
13obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
new file mode 100644
index 00000000000..ca304046d9e
--- /dev/null
+++ b/kernel/power/autosleep.c
@@ -0,0 +1,127 @@
1/*
2 * kernel/power/autosleep.c
3 *
4 * Opportunistic sleep support.
5 *
6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
7 */
8
9#include <linux/device.h>
10#include <linux/mutex.h>
11#include <linux/pm_wakeup.h>
12
13#include "power.h"
14
15static suspend_state_t autosleep_state;
16static struct workqueue_struct *autosleep_wq;
17/*
18 * Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source
19 * is active, otherwise a deadlock with try_to_suspend() is possible.
20 * Alternatively mutex_lock_interruptible() can be used. This will then fail
21 * if an auto_sleep cycle tries to freeze processes.
22 */
23static DEFINE_MUTEX(autosleep_lock);
24static struct wakeup_source *autosleep_ws;
25
26static void try_to_suspend(struct work_struct *work)
27{
28 unsigned int initial_count, final_count;
29
30 if (!pm_get_wakeup_count(&initial_count, true))
31 goto out;
32
33 mutex_lock(&autosleep_lock);
34
35 if (!pm_save_wakeup_count(initial_count)) {
36 mutex_unlock(&autosleep_lock);
37 goto out;
38 }
39
40 if (autosleep_state == PM_SUSPEND_ON) {
41 mutex_unlock(&autosleep_lock);
42 return;
43 }
44 if (autosleep_state >= PM_SUSPEND_MAX)
45 hibernate();
46 else
47 pm_suspend(autosleep_state);
48
49 mutex_unlock(&autosleep_lock);
50
51 if (!pm_get_wakeup_count(&final_count, false))
52 goto out;
53
54 /*
55 * If the wakeup occured for an unknown reason, wait to prevent the
56 * system from trying to suspend and waking up in a tight loop.
57 */
58 if (final_count == initial_count)
59 schedule_timeout_uninterruptible(HZ / 2);
60
61 out:
62 queue_up_suspend_work();
63}
64
65static DECLARE_WORK(suspend_work, try_to_suspend);
66
67void queue_up_suspend_work(void)
68{
69 if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
70 queue_work(autosleep_wq, &suspend_work);
71}
72
73suspend_state_t pm_autosleep_state(void)
74{
75 return autosleep_state;
76}
77
78int pm_autosleep_lock(void)
79{
80 return mutex_lock_interruptible(&autosleep_lock);
81}
82
83void pm_autosleep_unlock(void)
84{
85 mutex_unlock(&autosleep_lock);
86}
87
88int pm_autosleep_set_state(suspend_state_t state)
89{
90
91#ifndef CONFIG_HIBERNATION
92 if (state >= PM_SUSPEND_MAX)
93 return -EINVAL;
94#endif
95
96 __pm_stay_awake(autosleep_ws);
97
98 mutex_lock(&autosleep_lock);
99
100 autosleep_state = state;
101
102 __pm_relax(autosleep_ws);
103
104 if (state > PM_SUSPEND_ON) {
105 pm_wakep_autosleep_enabled(true);
106 queue_up_suspend_work();
107 } else {
108 pm_wakep_autosleep_enabled(false);
109 }
110
111 mutex_unlock(&autosleep_lock);
112 return 0;
113}
114
115int __init pm_autosleep_init(void)
116{
117 autosleep_ws = wakeup_source_register("autosleep");
118 if (!autosleep_ws)
119 return -ENOMEM;
120
121 autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
122 if (autosleep_wq)
123 return 0;
124
125 wakeup_source_unregister(autosleep_ws);
126 return -ENOMEM;
127}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index e09dfbfeece..8b53db38a27 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -25,6 +25,8 @@
25#include <linux/freezer.h> 25#include <linux/freezer.h>
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <linux/syscore_ops.h> 27#include <linux/syscore_ops.h>
28#include <linux/ctype.h>
29#include <linux/genhd.h>
28#include <scsi/scsi_scan.h> 30#include <scsi/scsi_scan.h>
29 31
30#include "power.h" 32#include "power.h"
@@ -722,6 +724,17 @@ static int software_resume(void)
722 724
723 /* Check if the device is there */ 725 /* Check if the device is there */
724 swsusp_resume_device = name_to_dev_t(resume_file); 726 swsusp_resume_device = name_to_dev_t(resume_file);
727
728 /*
729 * name_to_dev_t is ineffective to verify parition if resume_file is in
730 * integer format. (e.g. major:minor)
731 */
732 if (isdigit(resume_file[0]) && resume_wait) {
733 int partno;
734 while (!get_gendisk(swsusp_resume_device, &partno))
735 msleep(10);
736 }
737
725 if (!swsusp_resume_device) { 738 if (!swsusp_resume_device) {
726 /* 739 /*
727 * Some device discovery might still be in progress; we need 740 * Some device discovery might still be in progress; we need
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 1c12581f1c6..428f8a034e9 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -269,8 +269,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
269 return (s - buf); 269 return (s - buf);
270} 270}
271 271
272static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 272static suspend_state_t decode_state(const char *buf, size_t n)
273 const char *buf, size_t n)
274{ 273{
275#ifdef CONFIG_SUSPEND 274#ifdef CONFIG_SUSPEND
276 suspend_state_t state = PM_SUSPEND_STANDBY; 275 suspend_state_t state = PM_SUSPEND_STANDBY;
@@ -278,27 +277,48 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
278#endif 277#endif
279 char *p; 278 char *p;
280 int len; 279 int len;
281 int error = -EINVAL;
282 280
283 p = memchr(buf, '\n', n); 281 p = memchr(buf, '\n', n);
284 len = p ? p - buf : n; 282 len = p ? p - buf : n;
285 283
286 /* First, check if we are requested to hibernate */ 284 /* Check hibernation first. */
287 if (len == 4 && !strncmp(buf, "disk", len)) { 285 if (len == 4 && !strncmp(buf, "disk", len))
288 error = hibernate(); 286 return PM_SUSPEND_MAX;
289 goto Exit;
290 }
291 287
292#ifdef CONFIG_SUSPEND 288#ifdef CONFIG_SUSPEND
293 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 289 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
294 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 290 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
295 error = pm_suspend(state); 291 return state;
296 break;
297 }
298 }
299#endif 292#endif
300 293
301 Exit: 294 return PM_SUSPEND_ON;
295}
296
297static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
298 const char *buf, size_t n)
299{
300 suspend_state_t state;
301 int error;
302
303 error = pm_autosleep_lock();
304 if (error)
305 return error;
306
307 if (pm_autosleep_state() > PM_SUSPEND_ON) {
308 error = -EBUSY;
309 goto out;
310 }
311
312 state = decode_state(buf, n);
313 if (state < PM_SUSPEND_MAX)
314 error = pm_suspend(state);
315 else if (state == PM_SUSPEND_MAX)
316 error = hibernate();
317 else
318 error = -EINVAL;
319
320 out:
321 pm_autosleep_unlock();
302 return error ? error : n; 322 return error ? error : n;
303} 323}
304 324
@@ -339,7 +359,8 @@ static ssize_t wakeup_count_show(struct kobject *kobj,
339{ 359{
340 unsigned int val; 360 unsigned int val;
341 361
342 return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR; 362 return pm_get_wakeup_count(&val, true) ?
363 sprintf(buf, "%u\n", val) : -EINTR;
343} 364}
344 365
345static ssize_t wakeup_count_store(struct kobject *kobj, 366static ssize_t wakeup_count_store(struct kobject *kobj,
@@ -347,15 +368,106 @@ static ssize_t wakeup_count_store(struct kobject *kobj,
347 const char *buf, size_t n) 368 const char *buf, size_t n)
348{ 369{
349 unsigned int val; 370 unsigned int val;
371 int error;
372
373 error = pm_autosleep_lock();
374 if (error)
375 return error;
376
377 if (pm_autosleep_state() > PM_SUSPEND_ON) {
378 error = -EBUSY;
379 goto out;
380 }
350 381
382 error = -EINVAL;
351 if (sscanf(buf, "%u", &val) == 1) { 383 if (sscanf(buf, "%u", &val) == 1) {
352 if (pm_save_wakeup_count(val)) 384 if (pm_save_wakeup_count(val))
353 return n; 385 error = n;
354 } 386 }
355 return -EINVAL; 387
388 out:
389 pm_autosleep_unlock();
390 return error;
356} 391}
357 392
358power_attr(wakeup_count); 393power_attr(wakeup_count);
394
395#ifdef CONFIG_PM_AUTOSLEEP
396static ssize_t autosleep_show(struct kobject *kobj,
397 struct kobj_attribute *attr,
398 char *buf)
399{
400 suspend_state_t state = pm_autosleep_state();
401
402 if (state == PM_SUSPEND_ON)
403 return sprintf(buf, "off\n");
404
405#ifdef CONFIG_SUSPEND
406 if (state < PM_SUSPEND_MAX)
407 return sprintf(buf, "%s\n", valid_state(state) ?
408 pm_states[state] : "error");
409#endif
410#ifdef CONFIG_HIBERNATION
411 return sprintf(buf, "disk\n");
412#else
413 return sprintf(buf, "error");
414#endif
415}
416
417static ssize_t autosleep_store(struct kobject *kobj,
418 struct kobj_attribute *attr,
419 const char *buf, size_t n)
420{
421 suspend_state_t state = decode_state(buf, n);
422 int error;
423
424 if (state == PM_SUSPEND_ON
425 && strcmp(buf, "off") && strcmp(buf, "off\n"))
426 return -EINVAL;
427
428 error = pm_autosleep_set_state(state);
429 return error ? error : n;
430}
431
432power_attr(autosleep);
433#endif /* CONFIG_PM_AUTOSLEEP */
434
435#ifdef CONFIG_PM_WAKELOCKS
436static ssize_t wake_lock_show(struct kobject *kobj,
437 struct kobj_attribute *attr,
438 char *buf)
439{
440 return pm_show_wakelocks(buf, true);
441}
442
443static ssize_t wake_lock_store(struct kobject *kobj,
444 struct kobj_attribute *attr,
445 const char *buf, size_t n)
446{
447 int error = pm_wake_lock(buf);
448 return error ? error : n;
449}
450
451power_attr(wake_lock);
452
453static ssize_t wake_unlock_show(struct kobject *kobj,
454 struct kobj_attribute *attr,
455 char *buf)
456{
457 return pm_show_wakelocks(buf, false);
458}
459
460static ssize_t wake_unlock_store(struct kobject *kobj,
461 struct kobj_attribute *attr,
462 const char *buf, size_t n)
463{
464 int error = pm_wake_unlock(buf);
465 return error ? error : n;
466}
467
468power_attr(wake_unlock);
469
470#endif /* CONFIG_PM_WAKELOCKS */
359#endif /* CONFIG_PM_SLEEP */ 471#endif /* CONFIG_PM_SLEEP */
360 472
361#ifdef CONFIG_PM_TRACE 473#ifdef CONFIG_PM_TRACE
@@ -409,6 +521,13 @@ static struct attribute * g[] = {
409#ifdef CONFIG_PM_SLEEP 521#ifdef CONFIG_PM_SLEEP
410 &pm_async_attr.attr, 522 &pm_async_attr.attr,
411 &wakeup_count_attr.attr, 523 &wakeup_count_attr.attr,
524#ifdef CONFIG_PM_AUTOSLEEP
525 &autosleep_attr.attr,
526#endif
527#ifdef CONFIG_PM_WAKELOCKS
528 &wake_lock_attr.attr,
529 &wake_unlock_attr.attr,
530#endif
412#ifdef CONFIG_PM_DEBUG 531#ifdef CONFIG_PM_DEBUG
413 &pm_test_attr.attr, 532 &pm_test_attr.attr,
414#endif 533#endif
@@ -444,7 +563,10 @@ static int __init pm_init(void)
444 power_kobj = kobject_create_and_add("power", NULL); 563 power_kobj = kobject_create_and_add("power", NULL);
445 if (!power_kobj) 564 if (!power_kobj)
446 return -ENOMEM; 565 return -ENOMEM;
447 return sysfs_create_group(power_kobj, &attr_group); 566 error = sysfs_create_group(power_kobj, &attr_group);
567 if (error)
568 return error;
569 return pm_autosleep_init();
448} 570}
449 571
450core_initcall(pm_init); 572core_initcall(pm_init);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 98f3622d740..b0bd4beaebf 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -264,3 +264,30 @@ static inline void suspend_thaw_processes(void)
264{ 264{
265} 265}
266#endif 266#endif
267
268#ifdef CONFIG_PM_AUTOSLEEP
269
270/* kernel/power/autosleep.c */
271extern int pm_autosleep_init(void);
272extern int pm_autosleep_lock(void);
273extern void pm_autosleep_unlock(void);
274extern suspend_state_t pm_autosleep_state(void);
275extern int pm_autosleep_set_state(suspend_state_t state);
276
277#else /* !CONFIG_PM_AUTOSLEEP */
278
279static inline int pm_autosleep_init(void) { return 0; }
280static inline int pm_autosleep_lock(void) { return 0; }
281static inline void pm_autosleep_unlock(void) {}
282static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
283
284#endif /* !CONFIG_PM_AUTOSLEEP */
285
286#ifdef CONFIG_PM_WAKELOCKS
287
288/* kernel/power/wakelock.c */
289extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
290extern int pm_wake_lock(const char *buf);
291extern int pm_wake_unlock(const char *buf);
292
293#endif /* !CONFIG_PM_WAKELOCKS */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index eef311a58a6..11e22c068e8 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> 9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
10 * 10 *
11 * This file is released under the GPLv2. 11 * This file is released under the GPLv2.
12 * 12 *
@@ -282,14 +282,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
282 return -ENOSPC; 282 return -ENOSPC;
283 283
284 if (bio_chain) { 284 if (bio_chain) {
285 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 285 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
286 __GFP_NORETRY);
286 if (src) { 287 if (src) {
287 copy_page(src, buf); 288 copy_page(src, buf);
288 } else { 289 } else {
289 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ 290 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
290 if (ret) 291 if (ret)
291 return ret; 292 return ret;
292 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 293 src = (void *)__get_free_page(__GFP_WAIT |
294 __GFP_NOWARN |
295 __GFP_NORETRY);
293 if (src) { 296 if (src) {
294 copy_page(src, buf); 297 copy_page(src, buf);
295 } else { 298 } else {
@@ -367,12 +370,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
367 clear_page(handle->cur); 370 clear_page(handle->cur);
368 handle->cur_swap = offset; 371 handle->cur_swap = offset;
369 handle->k = 0; 372 handle->k = 0;
370 } 373
371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { 374 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
372 error = hib_wait_on_bio_chain(bio_chain); 375 error = hib_wait_on_bio_chain(bio_chain);
373 if (error) 376 if (error)
374 goto out; 377 goto out;
375 handle->reqd_free_pages = reqd_free_pages(); 378 /*
379 * Recalculate the number of required free pages, to
380 * make sure we never take more than half.
381 */
382 handle->reqd_free_pages = reqd_free_pages();
383 }
376 } 384 }
377 out: 385 out:
378 return error; 386 return error;
@@ -419,8 +427,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
419/* Maximum number of threads for compression/decompression. */ 427/* Maximum number of threads for compression/decompression. */
420#define LZO_THREADS 3 428#define LZO_THREADS 3
421 429
422/* Maximum number of pages for read buffering. */ 430/* Minimum/maximum number of pages for read buffering. */
423#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) 431#define LZO_MIN_RD_PAGES 1024
432#define LZO_MAX_RD_PAGES 8192
424 433
425 434
426/** 435/**
@@ -631,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
631 } 640 }
632 641
633 /* 642 /*
634 * Adjust number of free pages after all allocations have been done.
635 * We don't want to run out of pages when writing.
636 */
637 handle->reqd_free_pages = reqd_free_pages();
638
639 /*
640 * Start the CRC32 thread. 643 * Start the CRC32 thread.
641 */ 644 */
642 init_waitqueue_head(&crc->go); 645 init_waitqueue_head(&crc->go);
@@ -657,6 +660,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
657 goto out_clean; 660 goto out_clean;
658 } 661 }
659 662
663 /*
664 * Adjust the number of required free pages after all allocations have
665 * been done. We don't want to run out of pages when writing.
666 */
667 handle->reqd_free_pages = reqd_free_pages();
668
660 printk(KERN_INFO 669 printk(KERN_INFO
661 "PM: Using %u thread(s) for compression.\n" 670 "PM: Using %u thread(s) for compression.\n"
662 "PM: Compressing and saving image data (%u pages) ... ", 671 "PM: Compressing and saving image data (%u pages) ... ",
@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
1067 unsigned i, thr, run_threads, nr_threads; 1076 unsigned i, thr, run_threads, nr_threads;
1068 unsigned ring = 0, pg = 0, ring_size = 0, 1077 unsigned ring = 0, pg = 0, ring_size = 0,
1069 have = 0, want, need, asked = 0; 1078 have = 0, want, need, asked = 0;
1070 unsigned long read_pages; 1079 unsigned long read_pages = 0;
1071 unsigned char **page = NULL; 1080 unsigned char **page = NULL;
1072 struct dec_data *data = NULL; 1081 struct dec_data *data = NULL;
1073 struct crc_data *crc = NULL; 1082 struct crc_data *crc = NULL;
@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
1079 nr_threads = num_online_cpus() - 1; 1088 nr_threads = num_online_cpus() - 1;
1080 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 1089 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1081 1090
1082 page = vmalloc(sizeof(*page) * LZO_READ_PAGES); 1091 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1083 if (!page) { 1092 if (!page) {
1084 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 1093 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1085 ret = -ENOMEM; 1094 ret = -ENOMEM;
@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
1144 } 1153 }
1145 1154
1146 /* 1155 /*
1147 * Adjust number of pages for read buffering, in case we are short. 1156 * Set the number of pages for read buffering.
1157 * This is complete guesswork, because we'll only know the real
1158 * picture once prepare_image() is called, which is much later on
1159 * during the image load phase. We'll assume the worst case and
1160 * say that none of the image pages are from high memory.
1148 */ 1161 */
1149 read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; 1162 if (low_free_pages() > snapshot_get_image_size())
1150 read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); 1163 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1164 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1151 1165
1152 for (i = 0; i < read_pages; i++) { 1166 for (i = 0; i < read_pages; i++) {
1153 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? 1167 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1154 __GFP_WAIT | __GFP_HIGH : 1168 __GFP_WAIT | __GFP_HIGH :
1155 __GFP_WAIT); 1169 __GFP_WAIT | __GFP_NOWARN |
1170 __GFP_NORETRY);
1171
1156 if (!page[i]) { 1172 if (!page[i]) {
1157 if (i < LZO_CMP_PAGES) { 1173 if (i < LZO_CMP_PAGES) {
1158 ring_size = i; 1174 ring_size = i;
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 00000000000..c8fba338007
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,259 @@
1/*
2 * kernel/power/wakelock.c
3 *
4 * User space wakeup sources support.
5 *
6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
7 *
8 * This code is based on the analogous interface allowing user space to
9 * manipulate wakelocks on Android.
10 */
11
12#include <linux/ctype.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/hrtimer.h>
16#include <linux/list.h>
17#include <linux/rbtree.h>
18#include <linux/slab.h>
19
20static DEFINE_MUTEX(wakelocks_lock);
21
22struct wakelock {
23 char *name;
24 struct rb_node node;
25 struct wakeup_source ws;
26#ifdef CONFIG_PM_WAKELOCKS_GC
27 struct list_head lru;
28#endif
29};
30
31static struct rb_root wakelocks_tree = RB_ROOT;
32
33ssize_t pm_show_wakelocks(char *buf, bool show_active)
34{
35 struct rb_node *node;
36 struct wakelock *wl;
37 char *str = buf;
38 char *end = buf + PAGE_SIZE;
39
40 mutex_lock(&wakelocks_lock);
41
42 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
43 wl = rb_entry(node, struct wakelock, node);
44 if (wl->ws.active == show_active)
45 str += scnprintf(str, end - str, "%s ", wl->name);
46 }
47 if (str > buf)
48 str--;
49
50 str += scnprintf(str, end - str, "\n");
51
52 mutex_unlock(&wakelocks_lock);
53 return (str - buf);
54}
55
56#if CONFIG_PM_WAKELOCKS_LIMIT > 0
57static unsigned int number_of_wakelocks;
58
59static inline bool wakelocks_limit_exceeded(void)
60{
61 return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
62}
63
64static inline void increment_wakelocks_number(void)
65{
66 number_of_wakelocks++;
67}
68
69static inline void decrement_wakelocks_number(void)
70{
71 number_of_wakelocks--;
72}
73#else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
74static inline bool wakelocks_limit_exceeded(void) { return false; }
75static inline void increment_wakelocks_number(void) {}
76static inline void decrement_wakelocks_number(void) {}
77#endif /* CONFIG_PM_WAKELOCKS_LIMIT */
78
79#ifdef CONFIG_PM_WAKELOCKS_GC
80#define WL_GC_COUNT_MAX 100
81#define WL_GC_TIME_SEC 300
82
83static LIST_HEAD(wakelocks_lru_list);
84static unsigned int wakelocks_gc_count;
85
86static inline void wakelocks_lru_add(struct wakelock *wl)
87{
88 list_add(&wl->lru, &wakelocks_lru_list);
89}
90
91static inline void wakelocks_lru_most_recent(struct wakelock *wl)
92{
93 list_move(&wl->lru, &wakelocks_lru_list);
94}
95
96static void wakelocks_gc(void)
97{
98 struct wakelock *wl, *aux;
99 ktime_t now;
100
101 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
102 return;
103
104 now = ktime_get();
105 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
106 u64 idle_time_ns;
107 bool active;
108
109 spin_lock_irq(&wl->ws.lock);
110 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
111 active = wl->ws.active;
112 spin_unlock_irq(&wl->ws.lock);
113
114 if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
115 break;
116
117 if (!active) {
118 wakeup_source_remove(&wl->ws);
119 rb_erase(&wl->node, &wakelocks_tree);
120 list_del(&wl->lru);
121 kfree(wl->name);
122 kfree(wl);
123 decrement_wakelocks_number();
124 }
125 }
126 wakelocks_gc_count = 0;
127}
128#else /* !CONFIG_PM_WAKELOCKS_GC */
129static inline void wakelocks_lru_add(struct wakelock *wl) {}
130static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
131static inline void wakelocks_gc(void) {}
132#endif /* !CONFIG_PM_WAKELOCKS_GC */
133
134static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
135 bool add_if_not_found)
136{
137 struct rb_node **node = &wakelocks_tree.rb_node;
138 struct rb_node *parent = *node;
139 struct wakelock *wl;
140
141 while (*node) {
142 int diff;
143
144 parent = *node;
145 wl = rb_entry(*node, struct wakelock, node);
146 diff = strncmp(name, wl->name, len);
147 if (diff == 0) {
148 if (wl->name[len])
149 diff = -1;
150 else
151 return wl;
152 }
153 if (diff < 0)
154 node = &(*node)->rb_left;
155 else
156 node = &(*node)->rb_right;
157 }
158 if (!add_if_not_found)
159 return ERR_PTR(-EINVAL);
160
161 if (wakelocks_limit_exceeded())
162 return ERR_PTR(-ENOSPC);
163
164 /* Not found, we have to add a new one. */
165 wl = kzalloc(sizeof(*wl), GFP_KERNEL);
166 if (!wl)
167 return ERR_PTR(-ENOMEM);
168
169 wl->name = kstrndup(name, len, GFP_KERNEL);
170 if (!wl->name) {
171 kfree(wl);
172 return ERR_PTR(-ENOMEM);
173 }
174 wl->ws.name = wl->name;
175 wakeup_source_add(&wl->ws);
176 rb_link_node(&wl->node, parent, node);
177 rb_insert_color(&wl->node, &wakelocks_tree);
178 wakelocks_lru_add(wl);
179 increment_wakelocks_number();
180 return wl;
181}
182
183int pm_wake_lock(const char *buf)
184{
185 const char *str = buf;
186 struct wakelock *wl;
187 u64 timeout_ns = 0;
188 size_t len;
189 int ret = 0;
190
191 while (*str && !isspace(*str))
192 str++;
193
194 len = str - buf;
195 if (!len)
196 return -EINVAL;
197
198 if (*str && *str != '\n') {
199 /* Find out if there's a valid timeout string appended. */
200 ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
201 if (ret)
202 return -EINVAL;
203 }
204
205 mutex_lock(&wakelocks_lock);
206
207 wl = wakelock_lookup_add(buf, len, true);
208 if (IS_ERR(wl)) {
209 ret = PTR_ERR(wl);
210 goto out;
211 }
212 if (timeout_ns) {
213 u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
214
215 do_div(timeout_ms, NSEC_PER_MSEC);
216 __pm_wakeup_event(&wl->ws, timeout_ms);
217 } else {
218 __pm_stay_awake(&wl->ws);
219 }
220
221 wakelocks_lru_most_recent(wl);
222
223 out:
224 mutex_unlock(&wakelocks_lock);
225 return ret;
226}
227
228int pm_wake_unlock(const char *buf)
229{
230 struct wakelock *wl;
231 size_t len;
232 int ret = 0;
233
234 len = strlen(buf);
235 if (!len)
236 return -EINVAL;
237
238 if (buf[len-1] == '\n')
239 len--;
240
241 if (!len)
242 return -EINVAL;
243
244 mutex_lock(&wakelocks_lock);
245
246 wl = wakelock_lookup_add(buf, len, false);
247 if (IS_ERR(wl)) {
248 ret = PTR_ERR(wl);
249 goto out;
250 }
251 __pm_relax(&wl->ws);
252
253 wakelocks_lru_most_recent(wl);
254 wakelocks_gc();
255
256 out:
257 mutex_unlock(&wakelocks_lock);
258 return ret;
259}