diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /kernel/power | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'kernel/power')
-rw-r--r-- | kernel/power/Kconfig | 115 | ||||
-rw-r--r-- | kernel/power/Makefile | 11 | ||||
-rw-r--r-- | kernel/power/autosleep.c | 127 | ||||
-rw-r--r-- | kernel/power/console.c | 4 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 269 | ||||
-rw-r--r-- | kernel/power/main.c | 333 | ||||
-rw-r--r-- | kernel/power/power.h | 73 | ||||
-rw-r--r-- | kernel/power/poweroff.c | 2 | ||||
-rw-r--r-- | kernel/power/process.c | 181 | ||||
-rw-r--r-- | kernel/power/qos.c | 602 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 62 | ||||
-rw-r--r-- | kernel/power/suspend.c | 98 | ||||
-rw-r--r-- | kernel/power/swap.c | 938 | ||||
-rw-r--r-- | kernel/power/user.c | 194 | ||||
-rw-r--r-- | kernel/power/wakelock.c | 764 |
15 files changed, 1298 insertions, 2475 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 5dfdc9ea180..fcf5a834c4e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -18,6 +18,73 @@ config SUSPEND_FREEZER | |||
18 | 18 | ||
19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. | 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. |
20 | 20 | ||
21 | config HAS_WAKELOCK | ||
22 | bool | ||
23 | |||
24 | config HAS_EARLYSUSPEND | ||
25 | bool | ||
26 | |||
27 | config WAKELOCK | ||
28 | bool "Wake lock" | ||
29 | depends on PM && RTC_CLASS | ||
30 | default n | ||
31 | select HAS_WAKELOCK | ||
32 | ---help--- | ||
33 | Enable wakelocks. When user space request a sleep state the | ||
34 | sleep request will be delayed until no wake locks are held. | ||
35 | |||
36 | config WAKELOCK_STAT | ||
37 | bool "Wake lock stats" | ||
38 | depends on WAKELOCK | ||
39 | default y | ||
40 | ---help--- | ||
41 | Report wake lock stats in /proc/wakelocks | ||
42 | |||
43 | config USER_WAKELOCK | ||
44 | bool "Userspace wake locks" | ||
45 | depends on WAKELOCK | ||
46 | default y | ||
47 | ---help--- | ||
48 | User-space wake lock api. Write "lockname" or "lockname timeout" | ||
49 | to /sys/power/wake_lock lock and if needed create a wake lock. | ||
50 | Write "lockname" to /sys/power/wake_unlock to unlock a user wake | ||
51 | lock. | ||
52 | |||
53 | config EARLYSUSPEND | ||
54 | bool "Early suspend" | ||
55 | depends on WAKELOCK | ||
56 | default y | ||
57 | select HAS_EARLYSUSPEND | ||
58 | ---help--- | ||
59 | Call early suspend handlers when the user requested sleep state | ||
60 | changes. | ||
61 | |||
62 | choice | ||
63 | prompt "User-space screen access" | ||
64 | default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE | ||
65 | default CONSOLE_EARLYSUSPEND | ||
66 | depends on HAS_EARLYSUSPEND | ||
67 | |||
68 | config NO_USER_SPACE_SCREEN_ACCESS_CONTROL | ||
69 | bool "None" | ||
70 | |||
71 | config CONSOLE_EARLYSUSPEND | ||
72 | bool "Console switch on early-suspend" | ||
73 | depends on HAS_EARLYSUSPEND && VT | ||
74 | ---help--- | ||
75 | Register early suspend handler to perform a console switch to | ||
76 | when user-space should stop drawing to the screen and a switch | ||
77 | back when it should resume. | ||
78 | |||
79 | config FB_EARLYSUSPEND | ||
80 | bool "Sysfs interface" | ||
81 | depends on HAS_EARLYSUSPEND | ||
82 | ---help--- | ||
83 | Register early suspend handler that notifies and waits for | ||
84 | user-space through sysfs when user-space should stop drawing | ||
85 | to the screen and notifies user-space when it should resume. | ||
86 | endchoice | ||
87 | |||
21 | config HIBERNATE_CALLBACKS | 88 | config HIBERNATE_CALLBACKS |
22 | bool | 89 | bool |
23 | 90 | ||
@@ -27,7 +94,6 @@ config HIBERNATION | |||
27 | select HIBERNATE_CALLBACKS | 94 | select HIBERNATE_CALLBACKS |
28 | select LZO_COMPRESS | 95 | select LZO_COMPRESS |
29 | select LZO_DECOMPRESS | 96 | select LZO_DECOMPRESS |
30 | select CRC32 | ||
31 | ---help--- | 97 | ---help--- |
32 | Enable the suspend to disk (STD) functionality, which is usually | 98 | Enable the suspend to disk (STD) functionality, which is usually |
33 | called "hibernation" in user interfaces. STD checkpoints the | 99 | called "hibernation" in user interfaces. STD checkpoints the |
@@ -66,9 +132,6 @@ config HIBERNATION | |||
66 | 132 | ||
67 | For more information take a look at <file:Documentation/power/swsusp.txt>. | 133 | For more information take a look at <file:Documentation/power/swsusp.txt>. |
68 | 134 | ||
69 | config ARCH_SAVE_PAGE_KEYS | ||
70 | bool | ||
71 | |||
72 | config PM_STD_PARTITION | 135 | config PM_STD_PARTITION |
73 | string "Default resume partition" | 136 | string "Default resume partition" |
74 | depends on HIBERNATION | 137 | depends on HIBERNATION |
@@ -103,33 +166,6 @@ config PM_SLEEP_SMP | |||
103 | select HOTPLUG | 166 | select HOTPLUG |
104 | select HOTPLUG_CPU | 167 | select HOTPLUG_CPU |
105 | 168 | ||
106 | config PM_AUTOSLEEP | ||
107 | bool "Opportunistic sleep" | ||
108 | depends on PM_SLEEP | ||
109 | default n | ||
110 | ---help--- | ||
111 | Allow the kernel to trigger a system transition into a global sleep | ||
112 | state automatically whenever there are no active wakeup sources. | ||
113 | |||
114 | config PM_WAKELOCKS | ||
115 | bool "User space wakeup sources interface" | ||
116 | depends on PM_SLEEP | ||
117 | default n | ||
118 | ---help--- | ||
119 | Allow user space to create, activate and deactivate wakeup source | ||
120 | objects with the help of a sysfs-based interface. | ||
121 | |||
122 | config PM_WAKELOCKS_LIMIT | ||
123 | int "Maximum number of user space wakeup sources (0 = no limit)" | ||
124 | range 0 100000 | ||
125 | default 100 | ||
126 | depends on PM_WAKELOCKS | ||
127 | |||
128 | config PM_WAKELOCKS_GC | ||
129 | bool "Garbage collector for user space wakeup sources" | ||
130 | depends on PM_WAKELOCKS | ||
131 | default y | ||
132 | |||
133 | config PM_RUNTIME | 169 | config PM_RUNTIME |
134 | bool "Run-time PM core functionality" | 170 | bool "Run-time PM core functionality" |
135 | depends on !IA64_HP_SIM | 171 | depends on !IA64_HP_SIM |
@@ -175,7 +211,7 @@ config PM_TEST_SUSPEND | |||
175 | You probably want to have your system's RTC driver statically | 211 | You probably want to have your system's RTC driver statically |
176 | linked, ensuring that it's available when this test runs. | 212 | linked, ensuring that it's available when this test runs. |
177 | 213 | ||
178 | config PM_SLEEP_DEBUG | 214 | config CAN_PM_TRACE |
179 | def_bool y | 215 | def_bool y |
180 | depends on PM_DEBUG && PM_SLEEP | 216 | depends on PM_DEBUG && PM_SLEEP |
181 | 217 | ||
@@ -196,7 +232,7 @@ config PM_TRACE | |||
196 | 232 | ||
197 | config PM_TRACE_RTC | 233 | config PM_TRACE_RTC |
198 | bool "Suspend/resume event tracing" | 234 | bool "Suspend/resume event tracing" |
199 | depends on PM_SLEEP_DEBUG | 235 | depends on CAN_PM_TRACE |
200 | depends on X86 | 236 | depends on X86 |
201 | select PM_TRACE | 237 | select PM_TRACE |
202 | ---help--- | 238 | ---help--- |
@@ -263,14 +299,13 @@ config PM_GENERIC_DOMAINS | |||
263 | bool | 299 | bool |
264 | depends on PM | 300 | depends on PM |
265 | 301 | ||
266 | config PM_GENERIC_DOMAINS_SLEEP | ||
267 | def_bool y | ||
268 | depends on PM_SLEEP && PM_GENERIC_DOMAINS | ||
269 | |||
270 | config PM_GENERIC_DOMAINS_RUNTIME | 302 | config PM_GENERIC_DOMAINS_RUNTIME |
271 | def_bool y | 303 | def_bool y |
272 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS | 304 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS |
273 | 305 | ||
274 | config CPU_PM | 306 | config SUSPEND_TIME |
275 | bool | 307 | bool "Log time spent in suspend" |
276 | depends on SUSPEND || CPU_IDLE | 308 | ---help--- |
309 | Prints the time spent in suspend in the kernel log, and | ||
310 | keeps statistics on the time spent in suspend in | ||
311 | /sys/kernel/debug/suspend_time | ||
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 29472bff11e..9b224e16b19 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -1,15 +1,18 @@ | |||
1 | 1 | ||
2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG | 2 | ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG |
3 | 3 | ||
4 | obj-y += qos.o | ||
5 | obj-$(CONFIG_PM) += main.o | 4 | obj-$(CONFIG_PM) += main.o |
6 | obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o | 5 | obj-$(CONFIG_PM_SLEEP) += console.o |
7 | obj-$(CONFIG_FREEZER) += process.o | 6 | obj-$(CONFIG_FREEZER) += process.o |
8 | obj-$(CONFIG_SUSPEND) += suspend.o | 7 | obj-$(CONFIG_SUSPEND) += suspend.o |
9 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 8 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
10 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ | 9 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ |
11 | block_io.o | 10 | block_io.o |
12 | obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o | 11 | obj-$(CONFIG_WAKELOCK) += wakelock.o |
13 | obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o | 12 | obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o |
13 | obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o | ||
14 | obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o | ||
15 | obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o | ||
16 | obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o | ||
14 | 17 | ||
15 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 18 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c deleted file mode 100644 index ca304046d9e..00000000000 --- a/kernel/power/autosleep.c +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | /* | ||
2 | * kernel/power/autosleep.c | ||
3 | * | ||
4 | * Opportunistic sleep support. | ||
5 | * | ||
6 | * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/pm_wakeup.h> | ||
12 | |||
13 | #include "power.h" | ||
14 | |||
15 | static suspend_state_t autosleep_state; | ||
16 | static struct workqueue_struct *autosleep_wq; | ||
17 | /* | ||
18 | * Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source | ||
19 | * is active, otherwise a deadlock with try_to_suspend() is possible. | ||
20 | * Alternatively mutex_lock_interruptible() can be used. This will then fail | ||
21 | * if an auto_sleep cycle tries to freeze processes. | ||
22 | */ | ||
23 | static DEFINE_MUTEX(autosleep_lock); | ||
24 | static struct wakeup_source *autosleep_ws; | ||
25 | |||
26 | static void try_to_suspend(struct work_struct *work) | ||
27 | { | ||
28 | unsigned int initial_count, final_count; | ||
29 | |||
30 | if (!pm_get_wakeup_count(&initial_count, true)) | ||
31 | goto out; | ||
32 | |||
33 | mutex_lock(&autosleep_lock); | ||
34 | |||
35 | if (!pm_save_wakeup_count(initial_count)) { | ||
36 | mutex_unlock(&autosleep_lock); | ||
37 | goto out; | ||
38 | } | ||
39 | |||
40 | if (autosleep_state == PM_SUSPEND_ON) { | ||
41 | mutex_unlock(&autosleep_lock); | ||
42 | return; | ||
43 | } | ||
44 | if (autosleep_state >= PM_SUSPEND_MAX) | ||
45 | hibernate(); | ||
46 | else | ||
47 | pm_suspend(autosleep_state); | ||
48 | |||
49 | mutex_unlock(&autosleep_lock); | ||
50 | |||
51 | if (!pm_get_wakeup_count(&final_count, false)) | ||
52 | goto out; | ||
53 | |||
54 | /* | ||
55 | * If the wakeup occured for an unknown reason, wait to prevent the | ||
56 | * system from trying to suspend and waking up in a tight loop. | ||
57 | */ | ||
58 | if (final_count == initial_count) | ||
59 | schedule_timeout_uninterruptible(HZ / 2); | ||
60 | |||
61 | out: | ||
62 | queue_up_suspend_work(); | ||
63 | } | ||
64 | |||
65 | static DECLARE_WORK(suspend_work, try_to_suspend); | ||
66 | |||
67 | void queue_up_suspend_work(void) | ||
68 | { | ||
69 | if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON) | ||
70 | queue_work(autosleep_wq, &suspend_work); | ||
71 | } | ||
72 | |||
73 | suspend_state_t pm_autosleep_state(void) | ||
74 | { | ||
75 | return autosleep_state; | ||
76 | } | ||
77 | |||
78 | int pm_autosleep_lock(void) | ||
79 | { | ||
80 | return mutex_lock_interruptible(&autosleep_lock); | ||
81 | } | ||
82 | |||
83 | void pm_autosleep_unlock(void) | ||
84 | { | ||
85 | mutex_unlock(&autosleep_lock); | ||
86 | } | ||
87 | |||
88 | int pm_autosleep_set_state(suspend_state_t state) | ||
89 | { | ||
90 | |||
91 | #ifndef CONFIG_HIBERNATION | ||
92 | if (state >= PM_SUSPEND_MAX) | ||
93 | return -EINVAL; | ||
94 | #endif | ||
95 | |||
96 | __pm_stay_awake(autosleep_ws); | ||
97 | |||
98 | mutex_lock(&autosleep_lock); | ||
99 | |||
100 | autosleep_state = state; | ||
101 | |||
102 | __pm_relax(autosleep_ws); | ||
103 | |||
104 | if (state > PM_SUSPEND_ON) { | ||
105 | pm_wakep_autosleep_enabled(true); | ||
106 | queue_up_suspend_work(); | ||
107 | } else { | ||
108 | pm_wakep_autosleep_enabled(false); | ||
109 | } | ||
110 | |||
111 | mutex_unlock(&autosleep_lock); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | int __init pm_autosleep_init(void) | ||
116 | { | ||
117 | autosleep_ws = wakeup_source_register("autosleep"); | ||
118 | if (!autosleep_ws) | ||
119 | return -ENOMEM; | ||
120 | |||
121 | autosleep_wq = alloc_ordered_workqueue("autosleep", 0); | ||
122 | if (autosleep_wq) | ||
123 | return 0; | ||
124 | |||
125 | wakeup_source_unregister(autosleep_ws); | ||
126 | return -ENOMEM; | ||
127 | } | ||
diff --git a/kernel/power/console.c b/kernel/power/console.c index b1dc456474b..218e5af9015 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Functions for saving/restoring console. | 2 | * drivers/power/process.c - Functions for saving/restoring console. |
3 | * | 3 | * |
4 | * Originally from swsusp. | 4 | * Originally from swsusp. |
5 | */ | 5 | */ |
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include "power.h" | 11 | #include "power.h" |
12 | 12 | ||
13 | #if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) | ||
13 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | 14 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
14 | 15 | ||
15 | static int orig_fgconsole, orig_kmsg; | 16 | static int orig_fgconsole, orig_kmsg; |
@@ -31,3 +32,4 @@ void pm_restore_console(void) | |||
31 | vt_kmsg_redirect(orig_kmsg); | 32 | vt_kmsg_redirect(orig_kmsg); |
32 | } | 33 | } |
33 | } | 34 | } |
35 | #endif | ||
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index b26f5f1e773..8f7b1db1ece 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -5,18 +5,16 @@ | |||
5 | * Copyright (c) 2003 Open Source Development Lab | 5 | * Copyright (c) 2003 Open Source Development Lab |
6 | * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz> | 6 | * Copyright (c) 2004 Pavel Machek <pavel@ucw.cz> |
7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. | 7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. |
8 | * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com> | ||
9 | * | 8 | * |
10 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
11 | */ | 10 | */ |
12 | 11 | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
15 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
16 | #include <linux/reboot.h> | 14 | #include <linux/reboot.h> |
17 | #include <linux/string.h> | 15 | #include <linux/string.h> |
18 | #include <linux/device.h> | 16 | #include <linux/device.h> |
19 | #include <linux/async.h> | 17 | #include <linux/kmod.h> |
20 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
21 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
22 | #include <linux/mount.h> | 20 | #include <linux/mount.h> |
@@ -26,29 +24,25 @@ | |||
26 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
27 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
28 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
29 | #include <linux/ctype.h> | 27 | #include <scsi/scsi_scan.h> |
30 | #include <linux/genhd.h> | ||
31 | 28 | ||
32 | #include "power.h" | 29 | #include "power.h" |
33 | 30 | ||
34 | 31 | ||
35 | static int nocompress; | 32 | static int nocompress = 0; |
36 | static int noresume; | 33 | static int noresume = 0; |
37 | static int resume_wait; | ||
38 | static int resume_delay; | ||
39 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; | 34 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
40 | dev_t swsusp_resume_device; | 35 | dev_t swsusp_resume_device; |
41 | sector_t swsusp_resume_block; | 36 | sector_t swsusp_resume_block; |
42 | int in_suspend __nosavedata; | 37 | int in_suspend __nosavedata = 0; |
43 | 38 | ||
44 | enum { | 39 | enum { |
45 | HIBERNATION_INVALID, | 40 | HIBERNATION_INVALID, |
46 | HIBERNATION_PLATFORM, | 41 | HIBERNATION_PLATFORM, |
42 | HIBERNATION_TEST, | ||
43 | HIBERNATION_TESTPROC, | ||
47 | HIBERNATION_SHUTDOWN, | 44 | HIBERNATION_SHUTDOWN, |
48 | HIBERNATION_REBOOT, | 45 | HIBERNATION_REBOOT, |
49 | #ifdef CONFIG_SUSPEND | ||
50 | HIBERNATION_SUSPEND, | ||
51 | #endif | ||
52 | /* keep last */ | 46 | /* keep last */ |
53 | __HIBERNATION_AFTER_LAST | 47 | __HIBERNATION_AFTER_LAST |
54 | }; | 48 | }; |
@@ -57,8 +51,6 @@ enum { | |||
57 | 51 | ||
58 | static int hibernation_mode = HIBERNATION_SHUTDOWN; | 52 | static int hibernation_mode = HIBERNATION_SHUTDOWN; |
59 | 53 | ||
60 | bool freezer_test_done; | ||
61 | |||
62 | static const struct platform_hibernation_ops *hibernation_ops; | 54 | static const struct platform_hibernation_ops *hibernation_ops; |
63 | 55 | ||
64 | /** | 56 | /** |
@@ -73,14 +65,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops) | |||
73 | WARN_ON(1); | 65 | WARN_ON(1); |
74 | return; | 66 | return; |
75 | } | 67 | } |
76 | lock_system_sleep(); | 68 | mutex_lock(&pm_mutex); |
77 | hibernation_ops = ops; | 69 | hibernation_ops = ops; |
78 | if (ops) | 70 | if (ops) |
79 | hibernation_mode = HIBERNATION_PLATFORM; | 71 | hibernation_mode = HIBERNATION_PLATFORM; |
80 | else if (hibernation_mode == HIBERNATION_PLATFORM) | 72 | else if (hibernation_mode == HIBERNATION_PLATFORM) |
81 | hibernation_mode = HIBERNATION_SHUTDOWN; | 73 | hibernation_mode = HIBERNATION_SHUTDOWN; |
82 | 74 | ||
83 | unlock_system_sleep(); | 75 | mutex_unlock(&pm_mutex); |
84 | } | 76 | } |
85 | 77 | ||
86 | static bool entering_platform_hibernation; | 78 | static bool entering_platform_hibernation; |
@@ -98,6 +90,15 @@ static void hibernation_debug_sleep(void) | |||
98 | mdelay(5000); | 90 | mdelay(5000); |
99 | } | 91 | } |
100 | 92 | ||
93 | static int hibernation_testmode(int mode) | ||
94 | { | ||
95 | if (hibernation_mode == mode) { | ||
96 | hibernation_debug_sleep(); | ||
97 | return 1; | ||
98 | } | ||
99 | return 0; | ||
100 | } | ||
101 | |||
101 | static int hibernation_test(int level) | 102 | static int hibernation_test(int level) |
102 | { | 103 | { |
103 | if (pm_test_level == level) { | 104 | if (pm_test_level == level) { |
@@ -107,6 +108,7 @@ static int hibernation_test(int level) | |||
107 | return 0; | 108 | return 0; |
108 | } | 109 | } |
109 | #else /* !CONFIG_PM_DEBUG */ | 110 | #else /* !CONFIG_PM_DEBUG */ |
111 | static int hibernation_testmode(int mode) { return 0; } | ||
110 | static int hibernation_test(int level) { return 0; } | 112 | static int hibernation_test(int level) { return 0; } |
111 | #endif /* !CONFIG_PM_DEBUG */ | 113 | #endif /* !CONFIG_PM_DEBUG */ |
112 | 114 | ||
@@ -249,8 +251,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop, | |||
249 | * create_image - Create a hibernation image. | 251 | * create_image - Create a hibernation image. |
250 | * @platform_mode: Whether or not to use the platform driver. | 252 | * @platform_mode: Whether or not to use the platform driver. |
251 | * | 253 | * |
252 | * Execute device drivers' "late" and "noirq" freeze callbacks, create a | 254 | * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image |
253 | * hibernation image and run the drivers' "noirq" and "early" thaw callbacks. | 255 | * and execute the drivers' .thaw_noirq() callbacks. |
254 | * | 256 | * |
255 | * Control reappears in this routine after the subsequent restore. | 257 | * Control reappears in this routine after the subsequent restore. |
256 | */ | 258 | */ |
@@ -258,7 +260,7 @@ static int create_image(int platform_mode) | |||
258 | { | 260 | { |
259 | int error; | 261 | int error; |
260 | 262 | ||
261 | error = dpm_suspend_end(PMSG_FREEZE); | 263 | error = dpm_suspend_noirq(PMSG_FREEZE); |
262 | if (error) { | 264 | if (error) { |
263 | printk(KERN_ERR "PM: Some devices failed to power down, " | 265 | printk(KERN_ERR "PM: Some devices failed to power down, " |
264 | "aborting hibernation\n"); | 266 | "aborting hibernation\n"); |
@@ -270,7 +272,8 @@ static int create_image(int platform_mode) | |||
270 | goto Platform_finish; | 272 | goto Platform_finish; |
271 | 273 | ||
272 | error = disable_nonboot_cpus(); | 274 | error = disable_nonboot_cpus(); |
273 | if (error || hibernation_test(TEST_CPUS)) | 275 | if (error || hibernation_test(TEST_CPUS) |
276 | || hibernation_testmode(HIBERNATION_TEST)) | ||
274 | goto Enable_cpus; | 277 | goto Enable_cpus; |
275 | 278 | ||
276 | local_irq_disable(); | 279 | local_irq_disable(); |
@@ -310,7 +313,7 @@ static int create_image(int platform_mode) | |||
310 | Platform_finish: | 313 | Platform_finish: |
311 | platform_finish(platform_mode); | 314 | platform_finish(platform_mode); |
312 | 315 | ||
313 | dpm_resume_start(in_suspend ? | 316 | dpm_resume_noirq(in_suspend ? |
314 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 317 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
315 | 318 | ||
316 | return error; | 319 | return error; |
@@ -324,55 +327,38 @@ static int create_image(int platform_mode) | |||
324 | */ | 327 | */ |
325 | int hibernation_snapshot(int platform_mode) | 328 | int hibernation_snapshot(int platform_mode) |
326 | { | 329 | { |
327 | pm_message_t msg; | 330 | pm_message_t msg = PMSG_RECOVER; |
328 | int error; | 331 | int error; |
329 | 332 | ||
330 | error = platform_begin(platform_mode); | 333 | error = platform_begin(platform_mode); |
331 | if (error) | 334 | if (error) |
332 | goto Close; | 335 | goto Close; |
333 | 336 | ||
334 | /* Preallocate image memory before shutting down devices. */ | 337 | error = dpm_prepare(PMSG_FREEZE); |
335 | error = hibernate_preallocate_memory(); | ||
336 | if (error) | 338 | if (error) |
337 | goto Close; | 339 | goto Complete_devices; |
338 | 340 | ||
339 | error = freeze_kernel_threads(); | 341 | /* Preallocate image memory before shutting down devices. */ |
342 | error = hibernate_preallocate_memory(); | ||
340 | if (error) | 343 | if (error) |
341 | goto Cleanup; | 344 | goto Complete_devices; |
342 | |||
343 | if (hibernation_test(TEST_FREEZER)) { | ||
344 | |||
345 | /* | ||
346 | * Indicate to the caller that we are returning due to a | ||
347 | * successful freezer test. | ||
348 | */ | ||
349 | freezer_test_done = true; | ||
350 | goto Thaw; | ||
351 | } | ||
352 | |||
353 | error = dpm_prepare(PMSG_FREEZE); | ||
354 | if (error) { | ||
355 | dpm_complete(PMSG_RECOVER); | ||
356 | goto Thaw; | ||
357 | } | ||
358 | 345 | ||
359 | suspend_console(); | 346 | suspend_console(); |
360 | ftrace_stop(); | ||
361 | pm_restrict_gfp_mask(); | 347 | pm_restrict_gfp_mask(); |
362 | |||
363 | error = dpm_suspend(PMSG_FREEZE); | 348 | error = dpm_suspend(PMSG_FREEZE); |
349 | if (error) | ||
350 | goto Recover_platform; | ||
364 | 351 | ||
365 | if (error || hibernation_test(TEST_DEVICES)) | 352 | if (hibernation_test(TEST_DEVICES)) |
366 | platform_recover(platform_mode); | 353 | goto Recover_platform; |
367 | else | ||
368 | error = create_image(platform_mode); | ||
369 | 354 | ||
355 | error = create_image(platform_mode); | ||
370 | /* | 356 | /* |
371 | * In the case that we call create_image() above, the control | 357 | * Control returns here (1) after the image has been created or the |
372 | * returns here (1) after the image has been created or the | ||
373 | * image creation has failed and (2) after a successful restore. | 358 | * image creation has failed and (2) after a successful restore. |
374 | */ | 359 | */ |
375 | 360 | ||
361 | Resume_devices: | ||
376 | /* We may need to release the preallocated image pages here. */ | 362 | /* We may need to release the preallocated image pages here. */ |
377 | if (error || !in_suspend) | 363 | if (error || !in_suspend) |
378 | swsusp_free(); | 364 | swsusp_free(); |
@@ -383,35 +369,34 @@ int hibernation_snapshot(int platform_mode) | |||
383 | if (error || !in_suspend) | 369 | if (error || !in_suspend) |
384 | pm_restore_gfp_mask(); | 370 | pm_restore_gfp_mask(); |
385 | 371 | ||
386 | ftrace_start(); | ||
387 | resume_console(); | 372 | resume_console(); |
373 | |||
374 | Complete_devices: | ||
388 | dpm_complete(msg); | 375 | dpm_complete(msg); |
389 | 376 | ||
390 | Close: | 377 | Close: |
391 | platform_end(platform_mode); | 378 | platform_end(platform_mode); |
392 | return error; | 379 | return error; |
393 | 380 | ||
394 | Thaw: | 381 | Recover_platform: |
395 | thaw_kernel_threads(); | 382 | platform_recover(platform_mode); |
396 | Cleanup: | 383 | goto Resume_devices; |
397 | swsusp_free(); | ||
398 | goto Close; | ||
399 | } | 384 | } |
400 | 385 | ||
401 | /** | 386 | /** |
402 | * resume_target_kernel - Restore system state from a hibernation image. | 387 | * resume_target_kernel - Restore system state from a hibernation image. |
403 | * @platform_mode: Whether or not to use the platform driver. | 388 | * @platform_mode: Whether or not to use the platform driver. |
404 | * | 389 | * |
405 | * Execute device drivers' "noirq" and "late" freeze callbacks, restore the | 390 | * Execute device drivers' .freeze_noirq() callbacks, restore the contents of |
406 | * contents of highmem that have not been restored yet from the image and run | 391 | * highmem that have not been restored yet from the image and run the low-level |
407 | * the low-level code that will restore the remaining contents of memory and | 392 | * code that will restore the remaining contents of memory and switch to the |
408 | * switch to the just restored target kernel. | 393 | * just restored target kernel. |
409 | */ | 394 | */ |
410 | static int resume_target_kernel(bool platform_mode) | 395 | static int resume_target_kernel(bool platform_mode) |
411 | { | 396 | { |
412 | int error; | 397 | int error; |
413 | 398 | ||
414 | error = dpm_suspend_end(PMSG_QUIESCE); | 399 | error = dpm_suspend_noirq(PMSG_QUIESCE); |
415 | if (error) { | 400 | if (error) { |
416 | printk(KERN_ERR "PM: Some devices failed to power down, " | 401 | printk(KERN_ERR "PM: Some devices failed to power down, " |
417 | "aborting resume\n"); | 402 | "aborting resume\n"); |
@@ -468,7 +453,7 @@ static int resume_target_kernel(bool platform_mode) | |||
468 | Cleanup: | 453 | Cleanup: |
469 | platform_restore_cleanup(platform_mode); | 454 | platform_restore_cleanup(platform_mode); |
470 | 455 | ||
471 | dpm_resume_start(PMSG_RECOVER); | 456 | dpm_resume_noirq(PMSG_RECOVER); |
472 | 457 | ||
473 | return error; | 458 | return error; |
474 | } | 459 | } |
@@ -478,7 +463,7 @@ static int resume_target_kernel(bool platform_mode) | |||
478 | * @platform_mode: If set, use platform driver to prepare for the transition. | 463 | * @platform_mode: If set, use platform driver to prepare for the transition. |
479 | * | 464 | * |
480 | * This routine must be called with pm_mutex held. If it is successful, control | 465 | * This routine must be called with pm_mutex held. If it is successful, control |
481 | * reappears in the restored target kernel in hibernation_snapshot(). | 466 | * reappears in the restored target kernel in hibernation_snaphot(). |
482 | */ | 467 | */ |
483 | int hibernation_restore(int platform_mode) | 468 | int hibernation_restore(int platform_mode) |
484 | { | 469 | { |
@@ -486,7 +471,6 @@ int hibernation_restore(int platform_mode) | |||
486 | 471 | ||
487 | pm_prepare_console(); | 472 | pm_prepare_console(); |
488 | suspend_console(); | 473 | suspend_console(); |
489 | ftrace_stop(); | ||
490 | pm_restrict_gfp_mask(); | 474 | pm_restrict_gfp_mask(); |
491 | error = dpm_suspend_start(PMSG_QUIESCE); | 475 | error = dpm_suspend_start(PMSG_QUIESCE); |
492 | if (!error) { | 476 | if (!error) { |
@@ -494,7 +478,6 @@ int hibernation_restore(int platform_mode) | |||
494 | dpm_resume_end(PMSG_RECOVER); | 478 | dpm_resume_end(PMSG_RECOVER); |
495 | } | 479 | } |
496 | pm_restore_gfp_mask(); | 480 | pm_restore_gfp_mask(); |
497 | ftrace_start(); | ||
498 | resume_console(); | 481 | resume_console(); |
499 | pm_restore_console(); | 482 | pm_restore_console(); |
500 | return error; | 483 | return error; |
@@ -521,7 +504,6 @@ int hibernation_platform_enter(void) | |||
521 | 504 | ||
522 | entering_platform_hibernation = true; | 505 | entering_platform_hibernation = true; |
523 | suspend_console(); | 506 | suspend_console(); |
524 | ftrace_stop(); | ||
525 | error = dpm_suspend_start(PMSG_HIBERNATE); | 507 | error = dpm_suspend_start(PMSG_HIBERNATE); |
526 | if (error) { | 508 | if (error) { |
527 | if (hibernation_ops->recover) | 509 | if (hibernation_ops->recover) |
@@ -529,7 +511,7 @@ int hibernation_platform_enter(void) | |||
529 | goto Resume_devices; | 511 | goto Resume_devices; |
530 | } | 512 | } |
531 | 513 | ||
532 | error = dpm_suspend_end(PMSG_HIBERNATE); | 514 | error = dpm_suspend_noirq(PMSG_HIBERNATE); |
533 | if (error) | 515 | if (error) |
534 | goto Resume_devices; | 516 | goto Resume_devices; |
535 | 517 | ||
@@ -560,12 +542,11 @@ int hibernation_platform_enter(void) | |||
560 | Platform_finish: | 542 | Platform_finish: |
561 | hibernation_ops->finish(); | 543 | hibernation_ops->finish(); |
562 | 544 | ||
563 | dpm_resume_start(PMSG_RESTORE); | 545 | dpm_resume_noirq(PMSG_RESTORE); |
564 | 546 | ||
565 | Resume_devices: | 547 | Resume_devices: |
566 | entering_platform_hibernation = false; | 548 | entering_platform_hibernation = false; |
567 | dpm_resume_end(PMSG_RESTORE); | 549 | dpm_resume_end(PMSG_RESTORE); |
568 | ftrace_start(); | ||
569 | resume_console(); | 550 | resume_console(); |
570 | 551 | ||
571 | Close: | 552 | Close: |
@@ -583,11 +564,10 @@ int hibernation_platform_enter(void) | |||
583 | */ | 564 | */ |
584 | static void power_down(void) | 565 | static void power_down(void) |
585 | { | 566 | { |
586 | #ifdef CONFIG_SUSPEND | ||
587 | int error; | ||
588 | #endif | ||
589 | |||
590 | switch (hibernation_mode) { | 567 | switch (hibernation_mode) { |
568 | case HIBERNATION_TEST: | ||
569 | case HIBERNATION_TESTPROC: | ||
570 | break; | ||
591 | case HIBERNATION_REBOOT: | 571 | case HIBERNATION_REBOOT: |
592 | kernel_restart(NULL); | 572 | kernel_restart(NULL); |
593 | break; | 573 | break; |
@@ -596,25 +576,6 @@ static void power_down(void) | |||
596 | case HIBERNATION_SHUTDOWN: | 576 | case HIBERNATION_SHUTDOWN: |
597 | kernel_power_off(); | 577 | kernel_power_off(); |
598 | break; | 578 | break; |
599 | #ifdef CONFIG_SUSPEND | ||
600 | case HIBERNATION_SUSPEND: | ||
601 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); | ||
602 | if (error) { | ||
603 | if (hibernation_ops) | ||
604 | hibernation_mode = HIBERNATION_PLATFORM; | ||
605 | else | ||
606 | hibernation_mode = HIBERNATION_SHUTDOWN; | ||
607 | power_down(); | ||
608 | } | ||
609 | /* | ||
610 | * Restore swap signature. | ||
611 | */ | ||
612 | error = swsusp_unmark(); | ||
613 | if (error) | ||
614 | printk(KERN_ERR "PM: Swap will be unusable! " | ||
615 | "Try swapon -a.\n"); | ||
616 | return; | ||
617 | #endif | ||
618 | } | 579 | } |
619 | kernel_halt(); | 580 | kernel_halt(); |
620 | /* | 581 | /* |
@@ -625,6 +586,17 @@ static void power_down(void) | |||
625 | while(1); | 586 | while(1); |
626 | } | 587 | } |
627 | 588 | ||
589 | static int prepare_processes(void) | ||
590 | { | ||
591 | int error = 0; | ||
592 | |||
593 | if (freeze_processes()) { | ||
594 | error = -EBUSY; | ||
595 | thaw_processes(); | ||
596 | } | ||
597 | return error; | ||
598 | } | ||
599 | |||
628 | /** | 600 | /** |
629 | * hibernate - Carry out system hibernation, including saving the image. | 601 | * hibernate - Carry out system hibernation, including saving the image. |
630 | */ | 602 | */ |
@@ -632,7 +604,7 @@ int hibernate(void) | |||
632 | { | 604 | { |
633 | int error; | 605 | int error; |
634 | 606 | ||
635 | lock_system_sleep(); | 607 | mutex_lock(&pm_mutex); |
636 | /* The snapshot device should not be opened while we're running */ | 608 | /* The snapshot device should not be opened while we're running */ |
637 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 609 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
638 | error = -EBUSY; | 610 | error = -EBUSY; |
@@ -644,6 +616,10 @@ int hibernate(void) | |||
644 | if (error) | 616 | if (error) |
645 | goto Exit; | 617 | goto Exit; |
646 | 618 | ||
619 | error = usermodehelper_disable(); | ||
620 | if (error) | ||
621 | goto Exit; | ||
622 | |||
647 | /* Allocate memory management structures */ | 623 | /* Allocate memory management structures */ |
648 | error = create_basic_memory_bitmaps(); | 624 | error = create_basic_memory_bitmaps(); |
649 | if (error) | 625 | if (error) |
@@ -653,12 +629,18 @@ int hibernate(void) | |||
653 | sys_sync(); | 629 | sys_sync(); |
654 | printk("done.\n"); | 630 | printk("done.\n"); |
655 | 631 | ||
656 | error = freeze_processes(); | 632 | error = prepare_processes(); |
657 | if (error) | 633 | if (error) |
658 | goto Free_bitmaps; | 634 | goto Finish; |
635 | |||
636 | if (hibernation_test(TEST_FREEZER)) | ||
637 | goto Thaw; | ||
638 | |||
639 | if (hibernation_testmode(HIBERNATION_TESTPROC)) | ||
640 | goto Thaw; | ||
659 | 641 | ||
660 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); | 642 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
661 | if (error || freezer_test_done) | 643 | if (error) |
662 | goto Thaw; | 644 | goto Thaw; |
663 | 645 | ||
664 | if (in_suspend) { | 646 | if (in_suspend) { |
@@ -668,9 +650,6 @@ int hibernate(void) | |||
668 | flags |= SF_PLATFORM_MODE; | 650 | flags |= SF_PLATFORM_MODE; |
669 | if (nocompress) | 651 | if (nocompress) |
670 | flags |= SF_NOCOMPRESS_MODE; | 652 | flags |= SF_NOCOMPRESS_MODE; |
671 | else | ||
672 | flags |= SF_CRC32_MODE; | ||
673 | |||
674 | pr_debug("PM: writing image.\n"); | 653 | pr_debug("PM: writing image.\n"); |
675 | error = swsusp_write(flags); | 654 | error = swsusp_write(flags); |
676 | swsusp_free(); | 655 | swsusp_free(); |
@@ -684,18 +663,15 @@ int hibernate(void) | |||
684 | 663 | ||
685 | Thaw: | 664 | Thaw: |
686 | thaw_processes(); | 665 | thaw_processes(); |
687 | 666 | Finish: | |
688 | /* Don't bother checking whether freezer_test_done is true */ | ||
689 | freezer_test_done = false; | ||
690 | |||
691 | Free_bitmaps: | ||
692 | free_basic_memory_bitmaps(); | 667 | free_basic_memory_bitmaps(); |
668 | usermodehelper_enable(); | ||
693 | Exit: | 669 | Exit: |
694 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 670 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
695 | pm_restore_console(); | 671 | pm_restore_console(); |
696 | atomic_inc(&snapshot_device_available); | 672 | atomic_inc(&snapshot_device_available); |
697 | Unlock: | 673 | Unlock: |
698 | unlock_system_sleep(); | 674 | mutex_unlock(&pm_mutex); |
699 | return error; | 675 | return error; |
700 | } | 676 | } |
701 | 677 | ||
@@ -748,37 +724,20 @@ static int software_resume(void) | |||
748 | 724 | ||
749 | pr_debug("PM: Checking hibernation image partition %s\n", resume_file); | 725 | pr_debug("PM: Checking hibernation image partition %s\n", resume_file); |
750 | 726 | ||
751 | if (resume_delay) { | ||
752 | printk(KERN_INFO "Waiting %dsec before reading resume device...\n", | ||
753 | resume_delay); | ||
754 | ssleep(resume_delay); | ||
755 | } | ||
756 | |||
757 | /* Check if the device is there */ | 727 | /* Check if the device is there */ |
758 | swsusp_resume_device = name_to_dev_t(resume_file); | 728 | swsusp_resume_device = name_to_dev_t(resume_file); |
759 | |||
760 | /* | ||
761 | * name_to_dev_t is ineffective to verify parition if resume_file is in | ||
762 | * integer format. (e.g. major:minor) | ||
763 | */ | ||
764 | if (isdigit(resume_file[0]) && resume_wait) { | ||
765 | int partno; | ||
766 | while (!get_gendisk(swsusp_resume_device, &partno)) | ||
767 | msleep(10); | ||
768 | } | ||
769 | |||
770 | if (!swsusp_resume_device) { | 729 | if (!swsusp_resume_device) { |
771 | /* | 730 | /* |
772 | * Some device discovery might still be in progress; we need | 731 | * Some device discovery might still be in progress; we need |
773 | * to wait for this to finish. | 732 | * to wait for this to finish. |
774 | */ | 733 | */ |
775 | wait_for_device_probe(); | 734 | wait_for_device_probe(); |
776 | 735 | /* | |
777 | if (resume_wait) { | 736 | * We can't depend on SCSI devices being available after loading |
778 | while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0) | 737 | * one of their modules until scsi_complete_async_scans() is |
779 | msleep(10); | 738 | * called and the resume device usually is a SCSI one. |
780 | async_synchronize_full(); | 739 | */ |
781 | } | 740 | scsi_complete_async_scans(); |
782 | 741 | ||
783 | swsusp_resume_device = name_to_dev_t(resume_file); | 742 | swsusp_resume_device = name_to_dev_t(resume_file); |
784 | if (!swsusp_resume_device) { | 743 | if (!swsusp_resume_device) { |
@@ -808,12 +767,16 @@ static int software_resume(void) | |||
808 | if (error) | 767 | if (error) |
809 | goto close_finish; | 768 | goto close_finish; |
810 | 769 | ||
770 | error = usermodehelper_disable(); | ||
771 | if (error) | ||
772 | goto close_finish; | ||
773 | |||
811 | error = create_basic_memory_bitmaps(); | 774 | error = create_basic_memory_bitmaps(); |
812 | if (error) | 775 | if (error) |
813 | goto close_finish; | 776 | goto close_finish; |
814 | 777 | ||
815 | pr_debug("PM: Preparing processes for restore.\n"); | 778 | pr_debug("PM: Preparing processes for restore.\n"); |
816 | error = freeze_processes(); | 779 | error = prepare_processes(); |
817 | if (error) { | 780 | if (error) { |
818 | swsusp_close(FMODE_READ); | 781 | swsusp_close(FMODE_READ); |
819 | goto Done; | 782 | goto Done; |
@@ -831,6 +794,7 @@ static int software_resume(void) | |||
831 | thaw_processes(); | 794 | thaw_processes(); |
832 | Done: | 795 | Done: |
833 | free_basic_memory_bitmaps(); | 796 | free_basic_memory_bitmaps(); |
797 | usermodehelper_enable(); | ||
834 | Finish: | 798 | Finish: |
835 | pm_notifier_call_chain(PM_POST_RESTORE); | 799 | pm_notifier_call_chain(PM_POST_RESTORE); |
836 | pm_restore_console(); | 800 | pm_restore_console(); |
@@ -852,9 +816,8 @@ static const char * const hibernation_modes[] = { | |||
852 | [HIBERNATION_PLATFORM] = "platform", | 816 | [HIBERNATION_PLATFORM] = "platform", |
853 | [HIBERNATION_SHUTDOWN] = "shutdown", | 817 | [HIBERNATION_SHUTDOWN] = "shutdown", |
854 | [HIBERNATION_REBOOT] = "reboot", | 818 | [HIBERNATION_REBOOT] = "reboot", |
855 | #ifdef CONFIG_SUSPEND | 819 | [HIBERNATION_TEST] = "test", |
856 | [HIBERNATION_SUSPEND] = "suspend", | 820 | [HIBERNATION_TESTPROC] = "testproc", |
857 | #endif | ||
858 | }; | 821 | }; |
859 | 822 | ||
860 | /* | 823 | /* |
@@ -863,15 +826,17 @@ static const char * const hibernation_modes[] = { | |||
863 | * Hibernation can be handled in several ways. There are a few different ways | 826 | * Hibernation can be handled in several ways. There are a few different ways |
864 | * to put the system into the sleep state: using the platform driver (e.g. ACPI | 827 | * to put the system into the sleep state: using the platform driver (e.g. ACPI |
865 | * or other hibernation_ops), powering it off or rebooting it (for testing | 828 | * or other hibernation_ops), powering it off or rebooting it (for testing |
866 | * mostly). | 829 | * mostly), or using one of the two available test modes. |
867 | * | 830 | * |
868 | * The sysfs file /sys/power/disk provides an interface for selecting the | 831 | * The sysfs file /sys/power/disk provides an interface for selecting the |
869 | * hibernation mode to use. Reading from this file causes the available modes | 832 | * hibernation mode to use. Reading from this file causes the available modes |
870 | * to be printed. There are 3 modes that can be supported: | 833 | * to be printed. There are 5 modes that can be supported: |
871 | * | 834 | * |
872 | * 'platform' | 835 | * 'platform' |
873 | * 'shutdown' | 836 | * 'shutdown' |
874 | * 'reboot' | 837 | * 'reboot' |
838 | * 'test' | ||
839 | * 'testproc' | ||
875 | * | 840 | * |
876 | * If a platform hibernation driver is in use, 'platform' will be supported | 841 | * If a platform hibernation driver is in use, 'platform' will be supported |
877 | * and will be used by default. Otherwise, 'shutdown' will be used by default. | 842 | * and will be used by default. Otherwise, 'shutdown' will be used by default. |
@@ -895,9 +860,8 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
895 | switch (i) { | 860 | switch (i) { |
896 | case HIBERNATION_SHUTDOWN: | 861 | case HIBERNATION_SHUTDOWN: |
897 | case HIBERNATION_REBOOT: | 862 | case HIBERNATION_REBOOT: |
898 | #ifdef CONFIG_SUSPEND | 863 | case HIBERNATION_TEST: |
899 | case HIBERNATION_SUSPEND: | 864 | case HIBERNATION_TESTPROC: |
900 | #endif | ||
901 | break; | 865 | break; |
902 | case HIBERNATION_PLATFORM: | 866 | case HIBERNATION_PLATFORM: |
903 | if (hibernation_ops) | 867 | if (hibernation_ops) |
@@ -926,7 +890,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
926 | p = memchr(buf, '\n', n); | 890 | p = memchr(buf, '\n', n); |
927 | len = p ? p - buf : n; | 891 | len = p ? p - buf : n; |
928 | 892 | ||
929 | lock_system_sleep(); | 893 | mutex_lock(&pm_mutex); |
930 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { | 894 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
931 | if (len == strlen(hibernation_modes[i]) | 895 | if (len == strlen(hibernation_modes[i]) |
932 | && !strncmp(buf, hibernation_modes[i], len)) { | 896 | && !strncmp(buf, hibernation_modes[i], len)) { |
@@ -938,9 +902,8 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
938 | switch (mode) { | 902 | switch (mode) { |
939 | case HIBERNATION_SHUTDOWN: | 903 | case HIBERNATION_SHUTDOWN: |
940 | case HIBERNATION_REBOOT: | 904 | case HIBERNATION_REBOOT: |
941 | #ifdef CONFIG_SUSPEND | 905 | case HIBERNATION_TEST: |
942 | case HIBERNATION_SUSPEND: | 906 | case HIBERNATION_TESTPROC: |
943 | #endif | ||
944 | hibernation_mode = mode; | 907 | hibernation_mode = mode; |
945 | break; | 908 | break; |
946 | case HIBERNATION_PLATFORM: | 909 | case HIBERNATION_PLATFORM: |
@@ -955,7 +918,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
955 | if (!error) | 918 | if (!error) |
956 | pr_debug("PM: Hibernation mode set to '%s'\n", | 919 | pr_debug("PM: Hibernation mode set to '%s'\n", |
957 | hibernation_modes[mode]); | 920 | hibernation_modes[mode]); |
958 | unlock_system_sleep(); | 921 | mutex_unlock(&pm_mutex); |
959 | return error ? error : n; | 922 | return error ? error : n; |
960 | } | 923 | } |
961 | 924 | ||
@@ -982,9 +945,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
982 | if (maj != MAJOR(res) || min != MINOR(res)) | 945 | if (maj != MAJOR(res) || min != MINOR(res)) |
983 | goto out; | 946 | goto out; |
984 | 947 | ||
985 | lock_system_sleep(); | 948 | mutex_lock(&pm_mutex); |
986 | swsusp_resume_device = res; | 949 | swsusp_resume_device = res; |
987 | unlock_system_sleep(); | 950 | mutex_unlock(&pm_mutex); |
988 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); | 951 | printk(KERN_INFO "PM: Starting manual resume from disk\n"); |
989 | noresume = 0; | 952 | noresume = 0; |
990 | software_resume(); | 953 | software_resume(); |
@@ -1097,21 +1060,7 @@ static int __init noresume_setup(char *str) | |||
1097 | return 1; | 1060 | return 1; |
1098 | } | 1061 | } |
1099 | 1062 | ||
1100 | static int __init resumewait_setup(char *str) | ||
1101 | { | ||
1102 | resume_wait = 1; | ||
1103 | return 1; | ||
1104 | } | ||
1105 | |||
1106 | static int __init resumedelay_setup(char *str) | ||
1107 | { | ||
1108 | resume_delay = simple_strtoul(str, NULL, 0); | ||
1109 | return 1; | ||
1110 | } | ||
1111 | |||
1112 | __setup("noresume", noresume_setup); | 1063 | __setup("noresume", noresume_setup); |
1113 | __setup("resume_offset=", resume_offset_setup); | 1064 | __setup("resume_offset=", resume_offset_setup); |
1114 | __setup("resume=", resume_setup); | 1065 | __setup("resume=", resume_setup); |
1115 | __setup("hibernate=", hibernate_setup); | 1066 | __setup("hibernate=", hibernate_setup); |
1116 | __setup("resumewait", resumewait_setup); | ||
1117 | __setup("resumedelay=", resumedelay_setup); | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 1c16f9167de..3304594553c 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -3,18 +3,15 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2003 Patrick Mochel | 4 | * Copyright (c) 2003 Patrick Mochel |
5 | * Copyright (c) 2003 Open Source Development Lab | 5 | * Copyright (c) 2003 Open Source Development Lab |
6 | * | 6 | * |
7 | * This file is released under the GPLv2 | 7 | * This file is released under the GPLv2 |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/kobject.h> | 11 | #include <linux/kobject.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/resume-trace.h> | 13 | #include <linux/resume-trace.h> |
15 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
16 | #include <linux/debugfs.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | 15 | ||
19 | #include "power.h" | 16 | #include "power.h" |
20 | 17 | ||
@@ -59,7 +56,7 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
59 | { | 56 | { |
60 | unsigned long val; | 57 | unsigned long val; |
61 | 58 | ||
62 | if (kstrtoul(buf, 10, &val)) | 59 | if (strict_strtoul(buf, 10, &val)) |
63 | return -EINVAL; | 60 | return -EINVAL; |
64 | 61 | ||
65 | if (val > 1) | 62 | if (val > 1) |
@@ -116,7 +113,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
116 | p = memchr(buf, '\n', n); | 113 | p = memchr(buf, '\n', n); |
117 | len = p ? p - buf : n; | 114 | len = p ? p - buf : n; |
118 | 115 | ||
119 | lock_system_sleep(); | 116 | mutex_lock(&pm_mutex); |
120 | 117 | ||
121 | level = TEST_FIRST; | 118 | level = TEST_FIRST; |
122 | for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) | 119 | for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) |
@@ -126,7 +123,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
126 | break; | 123 | break; |
127 | } | 124 | } |
128 | 125 | ||
129 | unlock_system_sleep(); | 126 | mutex_unlock(&pm_mutex); |
130 | 127 | ||
131 | return error ? error : n; | 128 | return error ? error : n; |
132 | } | 129 | } |
@@ -134,148 +131,8 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
134 | power_attr(pm_test); | 131 | power_attr(pm_test); |
135 | #endif /* CONFIG_PM_DEBUG */ | 132 | #endif /* CONFIG_PM_DEBUG */ |
136 | 133 | ||
137 | #ifdef CONFIG_DEBUG_FS | ||
138 | static char *suspend_step_name(enum suspend_stat_step step) | ||
139 | { | ||
140 | switch (step) { | ||
141 | case SUSPEND_FREEZE: | ||
142 | return "freeze"; | ||
143 | case SUSPEND_PREPARE: | ||
144 | return "prepare"; | ||
145 | case SUSPEND_SUSPEND: | ||
146 | return "suspend"; | ||
147 | case SUSPEND_SUSPEND_NOIRQ: | ||
148 | return "suspend_noirq"; | ||
149 | case SUSPEND_RESUME_NOIRQ: | ||
150 | return "resume_noirq"; | ||
151 | case SUSPEND_RESUME: | ||
152 | return "resume"; | ||
153 | default: | ||
154 | return ""; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static int suspend_stats_show(struct seq_file *s, void *unused) | ||
159 | { | ||
160 | int i, index, last_dev, last_errno, last_step; | ||
161 | |||
162 | last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; | ||
163 | last_dev %= REC_FAILED_NUM; | ||
164 | last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; | ||
165 | last_errno %= REC_FAILED_NUM; | ||
166 | last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; | ||
167 | last_step %= REC_FAILED_NUM; | ||
168 | seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" | ||
169 | "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", | ||
170 | "success", suspend_stats.success, | ||
171 | "fail", suspend_stats.fail, | ||
172 | "failed_freeze", suspend_stats.failed_freeze, | ||
173 | "failed_prepare", suspend_stats.failed_prepare, | ||
174 | "failed_suspend", suspend_stats.failed_suspend, | ||
175 | "failed_suspend_late", | ||
176 | suspend_stats.failed_suspend_late, | ||
177 | "failed_suspend_noirq", | ||
178 | suspend_stats.failed_suspend_noirq, | ||
179 | "failed_resume", suspend_stats.failed_resume, | ||
180 | "failed_resume_early", | ||
181 | suspend_stats.failed_resume_early, | ||
182 | "failed_resume_noirq", | ||
183 | suspend_stats.failed_resume_noirq); | ||
184 | seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", | ||
185 | suspend_stats.failed_devs[last_dev]); | ||
186 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
187 | index = last_dev + REC_FAILED_NUM - i; | ||
188 | index %= REC_FAILED_NUM; | ||
189 | seq_printf(s, "\t\t\t%-s\n", | ||
190 | suspend_stats.failed_devs[index]); | ||
191 | } | ||
192 | seq_printf(s, " last_failed_errno:\t%-d\n", | ||
193 | suspend_stats.errno[last_errno]); | ||
194 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
195 | index = last_errno + REC_FAILED_NUM - i; | ||
196 | index %= REC_FAILED_NUM; | ||
197 | seq_printf(s, "\t\t\t%-d\n", | ||
198 | suspend_stats.errno[index]); | ||
199 | } | ||
200 | seq_printf(s, " last_failed_step:\t%-s\n", | ||
201 | suspend_step_name( | ||
202 | suspend_stats.failed_steps[last_step])); | ||
203 | for (i = 1; i < REC_FAILED_NUM; i++) { | ||
204 | index = last_step + REC_FAILED_NUM - i; | ||
205 | index %= REC_FAILED_NUM; | ||
206 | seq_printf(s, "\t\t\t%-s\n", | ||
207 | suspend_step_name( | ||
208 | suspend_stats.failed_steps[index])); | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int suspend_stats_open(struct inode *inode, struct file *file) | ||
215 | { | ||
216 | return single_open(file, suspend_stats_show, NULL); | ||
217 | } | ||
218 | |||
219 | static const struct file_operations suspend_stats_operations = { | ||
220 | .open = suspend_stats_open, | ||
221 | .read = seq_read, | ||
222 | .llseek = seq_lseek, | ||
223 | .release = single_release, | ||
224 | }; | ||
225 | |||
226 | static int __init pm_debugfs_init(void) | ||
227 | { | ||
228 | debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, | ||
229 | NULL, NULL, &suspend_stats_operations); | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | late_initcall(pm_debugfs_init); | ||
234 | #endif /* CONFIG_DEBUG_FS */ | ||
235 | |||
236 | #endif /* CONFIG_PM_SLEEP */ | 134 | #endif /* CONFIG_PM_SLEEP */ |
237 | 135 | ||
238 | #ifdef CONFIG_PM_SLEEP_DEBUG | ||
239 | /* | ||
240 | * pm_print_times: print time taken by devices to suspend and resume. | ||
241 | * | ||
242 | * show() returns whether printing of suspend and resume times is enabled. | ||
243 | * store() accepts 0 or 1. 0 disables printing and 1 enables it. | ||
244 | */ | ||
245 | bool pm_print_times_enabled; | ||
246 | |||
247 | static ssize_t pm_print_times_show(struct kobject *kobj, | ||
248 | struct kobj_attribute *attr, char *buf) | ||
249 | { | ||
250 | return sprintf(buf, "%d\n", pm_print_times_enabled); | ||
251 | } | ||
252 | |||
253 | static ssize_t pm_print_times_store(struct kobject *kobj, | ||
254 | struct kobj_attribute *attr, | ||
255 | const char *buf, size_t n) | ||
256 | { | ||
257 | unsigned long val; | ||
258 | |||
259 | if (kstrtoul(buf, 10, &val)) | ||
260 | return -EINVAL; | ||
261 | |||
262 | if (val > 1) | ||
263 | return -EINVAL; | ||
264 | |||
265 | pm_print_times_enabled = !!val; | ||
266 | return n; | ||
267 | } | ||
268 | |||
269 | power_attr(pm_print_times); | ||
270 | |||
271 | static inline void pm_print_times_init(void) | ||
272 | { | ||
273 | pm_print_times_enabled = !!initcall_debug; | ||
274 | } | ||
275 | #else /* !CONFIG_PP_SLEEP_DEBUG */ | ||
276 | static inline void pm_print_times_init(void) {} | ||
277 | #endif /* CONFIG_PM_SLEEP_DEBUG */ | ||
278 | |||
279 | struct kobject *power_kobj; | 136 | struct kobject *power_kobj; |
280 | 137 | ||
281 | /** | 138 | /** |
@@ -285,7 +142,7 @@ struct kobject *power_kobj; | |||
285 | * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and | 142 | * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and |
286 | * 'disk' (Suspend-to-Disk). | 143 | * 'disk' (Suspend-to-Disk). |
287 | * | 144 | * |
288 | * store() accepts one of those strings, translates it into the | 145 | * store() accepts one of those strings, translates it into the |
289 | * proper enumerated value, and initiates a suspend transition. | 146 | * proper enumerated value, and initiates a suspend transition. |
290 | */ | 147 | */ |
291 | static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, | 148 | static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, |
@@ -310,56 +167,47 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
310 | return (s - buf); | 167 | return (s - buf); |
311 | } | 168 | } |
312 | 169 | ||
313 | static suspend_state_t decode_state(const char *buf, size_t n) | 170 | static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, |
171 | const char *buf, size_t n) | ||
314 | { | 172 | { |
315 | #ifdef CONFIG_SUSPEND | 173 | #ifdef CONFIG_SUSPEND |
174 | #ifdef CONFIG_EARLYSUSPEND | ||
175 | suspend_state_t state = PM_SUSPEND_ON; | ||
176 | #else | ||
316 | suspend_state_t state = PM_SUSPEND_STANDBY; | 177 | suspend_state_t state = PM_SUSPEND_STANDBY; |
178 | #endif | ||
317 | const char * const *s; | 179 | const char * const *s; |
318 | #endif | 180 | #endif |
319 | char *p; | 181 | char *p; |
320 | int len; | 182 | int len; |
183 | int error = -EINVAL; | ||
321 | 184 | ||
322 | p = memchr(buf, '\n', n); | 185 | p = memchr(buf, '\n', n); |
323 | len = p ? p - buf : n; | 186 | len = p ? p - buf : n; |
324 | 187 | ||
325 | /* Check hibernation first. */ | 188 | /* First, check if we are requested to hibernate */ |
326 | if (len == 4 && !strncmp(buf, "disk", len)) | 189 | if (len == 4 && !strncmp(buf, "disk", len)) { |
327 | return PM_SUSPEND_MAX; | 190 | error = hibernate(); |
191 | goto Exit; | ||
192 | } | ||
328 | 193 | ||
329 | #ifdef CONFIG_SUSPEND | 194 | #ifdef CONFIG_SUSPEND |
330 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) | 195 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { |
331 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) | 196 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) |
332 | return state; | 197 | break; |
333 | #endif | ||
334 | |||
335 | return PM_SUSPEND_ON; | ||
336 | } | ||
337 | |||
338 | static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
339 | const char *buf, size_t n) | ||
340 | { | ||
341 | suspend_state_t state; | ||
342 | int error; | ||
343 | |||
344 | error = pm_autosleep_lock(); | ||
345 | if (error) | ||
346 | return error; | ||
347 | |||
348 | if (pm_autosleep_state() > PM_SUSPEND_ON) { | ||
349 | error = -EBUSY; | ||
350 | goto out; | ||
351 | } | 198 | } |
199 | if (state < PM_SUSPEND_MAX && *s) | ||
200 | #ifdef CONFIG_EARLYSUSPEND | ||
201 | if (state == PM_SUSPEND_ON || valid_state(state)) { | ||
202 | error = 0; | ||
203 | request_suspend_state(state); | ||
204 | } | ||
205 | #else | ||
206 | error = enter_state(state); | ||
207 | #endif | ||
208 | #endif | ||
352 | 209 | ||
353 | state = decode_state(buf, n); | 210 | Exit: |
354 | if (state < PM_SUSPEND_MAX) | ||
355 | error = pm_suspend(state); | ||
356 | else if (state == PM_SUSPEND_MAX) | ||
357 | error = hibernate(); | ||
358 | else | ||
359 | error = -EINVAL; | ||
360 | |||
361 | out: | ||
362 | pm_autosleep_unlock(); | ||
363 | return error ? error : n; | 211 | return error ? error : n; |
364 | } | 212 | } |
365 | 213 | ||
@@ -400,8 +248,7 @@ static ssize_t wakeup_count_show(struct kobject *kobj, | |||
400 | { | 248 | { |
401 | unsigned int val; | 249 | unsigned int val; |
402 | 250 | ||
403 | return pm_get_wakeup_count(&val, true) ? | 251 | return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR; |
404 | sprintf(buf, "%u\n", val) : -EINTR; | ||
405 | } | 252 | } |
406 | 253 | ||
407 | static ssize_t wakeup_count_store(struct kobject *kobj, | 254 | static ssize_t wakeup_count_store(struct kobject *kobj, |
@@ -409,106 +256,15 @@ static ssize_t wakeup_count_store(struct kobject *kobj, | |||
409 | const char *buf, size_t n) | 256 | const char *buf, size_t n) |
410 | { | 257 | { |
411 | unsigned int val; | 258 | unsigned int val; |
412 | int error; | ||
413 | |||
414 | error = pm_autosleep_lock(); | ||
415 | if (error) | ||
416 | return error; | ||
417 | 259 | ||
418 | if (pm_autosleep_state() > PM_SUSPEND_ON) { | ||
419 | error = -EBUSY; | ||
420 | goto out; | ||
421 | } | ||
422 | |||
423 | error = -EINVAL; | ||
424 | if (sscanf(buf, "%u", &val) == 1) { | 260 | if (sscanf(buf, "%u", &val) == 1) { |
425 | if (pm_save_wakeup_count(val)) | 261 | if (pm_save_wakeup_count(val)) |
426 | error = n; | 262 | return n; |
427 | } | 263 | } |
428 | 264 | return -EINVAL; | |
429 | out: | ||
430 | pm_autosleep_unlock(); | ||
431 | return error; | ||
432 | } | 265 | } |
433 | 266 | ||
434 | power_attr(wakeup_count); | 267 | power_attr(wakeup_count); |
435 | |||
436 | #ifdef CONFIG_PM_AUTOSLEEP | ||
437 | static ssize_t autosleep_show(struct kobject *kobj, | ||
438 | struct kobj_attribute *attr, | ||
439 | char *buf) | ||
440 | { | ||
441 | suspend_state_t state = pm_autosleep_state(); | ||
442 | |||
443 | if (state == PM_SUSPEND_ON) | ||
444 | return sprintf(buf, "off\n"); | ||
445 | |||
446 | #ifdef CONFIG_SUSPEND | ||
447 | if (state < PM_SUSPEND_MAX) | ||
448 | return sprintf(buf, "%s\n", valid_state(state) ? | ||
449 | pm_states[state] : "error"); | ||
450 | #endif | ||
451 | #ifdef CONFIG_HIBERNATION | ||
452 | return sprintf(buf, "disk\n"); | ||
453 | #else | ||
454 | return sprintf(buf, "error"); | ||
455 | #endif | ||
456 | } | ||
457 | |||
458 | static ssize_t autosleep_store(struct kobject *kobj, | ||
459 | struct kobj_attribute *attr, | ||
460 | const char *buf, size_t n) | ||
461 | { | ||
462 | suspend_state_t state = decode_state(buf, n); | ||
463 | int error; | ||
464 | |||
465 | if (state == PM_SUSPEND_ON | ||
466 | && strcmp(buf, "off") && strcmp(buf, "off\n")) | ||
467 | return -EINVAL; | ||
468 | |||
469 | error = pm_autosleep_set_state(state); | ||
470 | return error ? error : n; | ||
471 | } | ||
472 | |||
473 | power_attr(autosleep); | ||
474 | #endif /* CONFIG_PM_AUTOSLEEP */ | ||
475 | |||
476 | #ifdef CONFIG_PM_WAKELOCKS | ||
477 | static ssize_t wake_lock_show(struct kobject *kobj, | ||
478 | struct kobj_attribute *attr, | ||
479 | char *buf) | ||
480 | { | ||
481 | return pm_show_wakelocks(buf, true); | ||
482 | } | ||
483 | |||
484 | static ssize_t wake_lock_store(struct kobject *kobj, | ||
485 | struct kobj_attribute *attr, | ||
486 | const char *buf, size_t n) | ||
487 | { | ||
488 | int error = pm_wake_lock(buf); | ||
489 | return error ? error : n; | ||
490 | } | ||
491 | |||
492 | power_attr(wake_lock); | ||
493 | |||
494 | static ssize_t wake_unlock_show(struct kobject *kobj, | ||
495 | struct kobj_attribute *attr, | ||
496 | char *buf) | ||
497 | { | ||
498 | return pm_show_wakelocks(buf, false); | ||
499 | } | ||
500 | |||
501 | static ssize_t wake_unlock_store(struct kobject *kobj, | ||
502 | struct kobj_attribute *attr, | ||
503 | const char *buf, size_t n) | ||
504 | { | ||
505 | int error = pm_wake_unlock(buf); | ||
506 | return error ? error : n; | ||
507 | } | ||
508 | |||
509 | power_attr(wake_unlock); | ||
510 | |||
511 | #endif /* CONFIG_PM_WAKELOCKS */ | ||
512 | #endif /* CONFIG_PM_SLEEP */ | 268 | #endif /* CONFIG_PM_SLEEP */ |
513 | 269 | ||
514 | #ifdef CONFIG_PM_TRACE | 270 | #ifdef CONFIG_PM_TRACE |
@@ -553,6 +309,11 @@ power_attr(pm_trace_dev_match); | |||
553 | 309 | ||
554 | #endif /* CONFIG_PM_TRACE */ | 310 | #endif /* CONFIG_PM_TRACE */ |
555 | 311 | ||
312 | #ifdef CONFIG_USER_WAKELOCK | ||
313 | power_attr(wake_lock); | ||
314 | power_attr(wake_unlock); | ||
315 | #endif | ||
316 | |||
556 | static struct attribute * g[] = { | 317 | static struct attribute * g[] = { |
557 | &state_attr.attr, | 318 | &state_attr.attr, |
558 | #ifdef CONFIG_PM_TRACE | 319 | #ifdef CONFIG_PM_TRACE |
@@ -562,18 +323,12 @@ static struct attribute * g[] = { | |||
562 | #ifdef CONFIG_PM_SLEEP | 323 | #ifdef CONFIG_PM_SLEEP |
563 | &pm_async_attr.attr, | 324 | &pm_async_attr.attr, |
564 | &wakeup_count_attr.attr, | 325 | &wakeup_count_attr.attr, |
565 | #ifdef CONFIG_PM_AUTOSLEEP | ||
566 | &autosleep_attr.attr, | ||
567 | #endif | ||
568 | #ifdef CONFIG_PM_WAKELOCKS | ||
569 | &wake_lock_attr.attr, | ||
570 | &wake_unlock_attr.attr, | ||
571 | #endif | ||
572 | #ifdef CONFIG_PM_DEBUG | 326 | #ifdef CONFIG_PM_DEBUG |
573 | &pm_test_attr.attr, | 327 | &pm_test_attr.attr, |
574 | #endif | 328 | #endif |
575 | #ifdef CONFIG_PM_SLEEP_DEBUG | 329 | #ifdef CONFIG_USER_WAKELOCK |
576 | &pm_print_times_attr.attr, | 330 | &wake_lock_attr.attr, |
331 | &wake_unlock_attr.attr, | ||
577 | #endif | 332 | #endif |
578 | #endif | 333 | #endif |
579 | NULL, | 334 | NULL, |
@@ -607,11 +362,7 @@ static int __init pm_init(void) | |||
607 | power_kobj = kobject_create_and_add("power", NULL); | 362 | power_kobj = kobject_create_and_add("power", NULL); |
608 | if (!power_kobj) | 363 | if (!power_kobj) |
609 | return -ENOMEM; | 364 | return -ENOMEM; |
610 | error = sysfs_create_group(power_kobj, &attr_group); | 365 | return sysfs_create_group(power_kobj, &attr_group); |
611 | if (error) | ||
612 | return error; | ||
613 | pm_print_times_init(); | ||
614 | return pm_autosleep_init(); | ||
615 | } | 366 | } |
616 | 367 | ||
617 | core_initcall(pm_init); | 368 | core_initcall(pm_init); |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 7d4b7ffb3c1..b6b9006480f 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -50,8 +50,6 @@ static inline char *check_image_kernel(struct swsusp_info *info) | |||
50 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) | 50 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) |
51 | 51 | ||
52 | /* kernel/power/hibernate.c */ | 52 | /* kernel/power/hibernate.c */ |
53 | extern bool freezer_test_done; | ||
54 | |||
55 | extern int hibernation_snapshot(int platform_mode); | 53 | extern int hibernation_snapshot(int platform_mode); |
56 | extern int hibernation_restore(int platform_mode); | 54 | extern int hibernation_restore(int platform_mode); |
57 | extern int hibernation_platform_enter(void); | 55 | extern int hibernation_platform_enter(void); |
@@ -148,7 +146,6 @@ extern int swsusp_swap_in_use(void); | |||
148 | */ | 146 | */ |
149 | #define SF_PLATFORM_MODE 1 | 147 | #define SF_PLATFORM_MODE 1 |
150 | #define SF_NOCOMPRESS_MODE 2 | 148 | #define SF_NOCOMPRESS_MODE 2 |
151 | #define SF_CRC32_MODE 4 | ||
152 | 149 | ||
153 | /* kernel/power/hibernate.c */ | 150 | /* kernel/power/hibernate.c */ |
154 | extern int swsusp_check(void); | 151 | extern int swsusp_check(void); |
@@ -156,9 +153,6 @@ extern void swsusp_free(void); | |||
156 | extern int swsusp_read(unsigned int *flags_p); | 153 | extern int swsusp_read(unsigned int *flags_p); |
157 | extern int swsusp_write(unsigned int flags); | 154 | extern int swsusp_write(unsigned int flags); |
158 | extern void swsusp_close(fmode_t); | 155 | extern void swsusp_close(fmode_t); |
159 | #ifdef CONFIG_SUSPEND | ||
160 | extern int swsusp_unmark(void); | ||
161 | #endif | ||
162 | 156 | ||
163 | /* kernel/power/block_io.c */ | 157 | /* kernel/power/block_io.c */ |
164 | extern struct block_device *hib_resume_bdev; | 158 | extern struct block_device *hib_resume_bdev; |
@@ -180,11 +174,13 @@ extern const char *const pm_states[]; | |||
180 | 174 | ||
181 | extern bool valid_state(suspend_state_t state); | 175 | extern bool valid_state(suspend_state_t state); |
182 | extern int suspend_devices_and_enter(suspend_state_t state); | 176 | extern int suspend_devices_and_enter(suspend_state_t state); |
177 | extern int enter_state(suspend_state_t state); | ||
183 | #else /* !CONFIG_SUSPEND */ | 178 | #else /* !CONFIG_SUSPEND */ |
184 | static inline int suspend_devices_and_enter(suspend_state_t state) | 179 | static inline int suspend_devices_and_enter(suspend_state_t state) |
185 | { | 180 | { |
186 | return -ENOSYS; | 181 | return -ENOSYS; |
187 | } | 182 | } |
183 | static inline int enter_state(suspend_state_t state) { return -ENOSYS; } | ||
188 | static inline bool valid_state(suspend_state_t state) { return false; } | 184 | static inline bool valid_state(suspend_state_t state) { return false; } |
189 | #endif /* !CONFIG_SUSPEND */ | 185 | #endif /* !CONFIG_SUSPEND */ |
190 | 186 | ||
@@ -232,25 +228,7 @@ extern int pm_test_level; | |||
232 | #ifdef CONFIG_SUSPEND_FREEZER | 228 | #ifdef CONFIG_SUSPEND_FREEZER |
233 | static inline int suspend_freeze_processes(void) | 229 | static inline int suspend_freeze_processes(void) |
234 | { | 230 | { |
235 | int error; | 231 | return freeze_processes(); |
236 | |||
237 | error = freeze_processes(); | ||
238 | /* | ||
239 | * freeze_processes() automatically thaws every task if freezing | ||
240 | * fails. So we need not do anything extra upon error. | ||
241 | */ | ||
242 | if (error) | ||
243 | return error; | ||
244 | |||
245 | error = freeze_kernel_threads(); | ||
246 | /* | ||
247 | * freeze_kernel_threads() thaws only kernel threads upon freezing | ||
248 | * failure. So we have to thaw the userspace tasks ourselves. | ||
249 | */ | ||
250 | if (error) | ||
251 | thaw_processes(); | ||
252 | |||
253 | return error; | ||
254 | } | 232 | } |
255 | 233 | ||
256 | static inline void suspend_thaw_processes(void) | 234 | static inline void suspend_thaw_processes(void) |
@@ -268,29 +246,26 @@ static inline void suspend_thaw_processes(void) | |||
268 | } | 246 | } |
269 | #endif | 247 | #endif |
270 | 248 | ||
271 | #ifdef CONFIG_PM_AUTOSLEEP | 249 | #ifdef CONFIG_WAKELOCK |
272 | |||
273 | /* kernel/power/autosleep.c */ | ||
274 | extern int pm_autosleep_init(void); | ||
275 | extern int pm_autosleep_lock(void); | ||
276 | extern void pm_autosleep_unlock(void); | ||
277 | extern suspend_state_t pm_autosleep_state(void); | ||
278 | extern int pm_autosleep_set_state(suspend_state_t state); | ||
279 | |||
280 | #else /* !CONFIG_PM_AUTOSLEEP */ | ||
281 | |||
282 | static inline int pm_autosleep_init(void) { return 0; } | ||
283 | static inline int pm_autosleep_lock(void) { return 0; } | ||
284 | static inline void pm_autosleep_unlock(void) {} | ||
285 | static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; } | ||
286 | |||
287 | #endif /* !CONFIG_PM_AUTOSLEEP */ | ||
288 | |||
289 | #ifdef CONFIG_PM_WAKELOCKS | ||
290 | |||
291 | /* kernel/power/wakelock.c */ | 250 | /* kernel/power/wakelock.c */ |
292 | extern ssize_t pm_show_wakelocks(char *buf, bool show_active); | 251 | extern struct workqueue_struct *suspend_work_queue; |
293 | extern int pm_wake_lock(const char *buf); | 252 | extern struct wake_lock main_wake_lock; |
294 | extern int pm_wake_unlock(const char *buf); | 253 | extern suspend_state_t requested_suspend_state; |
254 | #endif | ||
295 | 255 | ||
296 | #endif /* !CONFIG_PM_WAKELOCKS */ | 256 | #ifdef CONFIG_USER_WAKELOCK |
257 | ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr, | ||
258 | char *buf); | ||
259 | ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
260 | const char *buf, size_t n); | ||
261 | ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr, | ||
262 | char *buf); | ||
263 | ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
264 | const char *buf, size_t n); | ||
265 | #endif | ||
266 | |||
267 | #ifdef CONFIG_EARLYSUSPEND | ||
268 | /* kernel/power/earlysuspend.c */ | ||
269 | void request_suspend_state(suspend_state_t state); | ||
270 | suspend_state_t get_suspend_state(void); | ||
271 | #endif | ||
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 68197a4e8fc..d52359374e8 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = { | |||
37 | .enable_mask = SYSRQ_ENABLE_BOOT, | 37 | .enable_mask = SYSRQ_ENABLE_BOOT, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static int __init pm_sysrq_init(void) | 40 | static int pm_sysrq_init(void) |
41 | { | 41 | { |
42 | register_sysrq_key('o', &sysrq_poweroff_op); | 42 | register_sysrq_key('o', &sysrq_poweroff_op); |
43 | return 0; | 43 | return 0; |
diff --git a/kernel/power/process.c b/kernel/power/process.c index d5a258b60c6..31338cdeafc 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -16,14 +16,23 @@ | |||
16 | #include <linux/freezer.h> | 16 | #include <linux/freezer.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
19 | #include <linux/kmod.h> | 19 | #include <linux/wakelock.h> |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * Timeout for stopping processes | 22 | * Timeout for stopping processes |
23 | */ | 23 | */ |
24 | #define TIMEOUT (20 * HZ) | 24 | #define TIMEOUT (20 * HZ) |
25 | 25 | ||
26 | static int try_to_freeze_tasks(bool user_only) | 26 | static inline int freezable(struct task_struct * p) |
27 | { | ||
28 | if ((p == current) || | ||
29 | (p->flags & PF_NOFREEZE) || | ||
30 | (p->exit_state != 0)) | ||
31 | return 0; | ||
32 | return 1; | ||
33 | } | ||
34 | |||
35 | static int try_to_freeze_tasks(bool sig_only) | ||
27 | { | 36 | { |
28 | struct task_struct *g, *p; | 37 | struct task_struct *g, *p; |
29 | unsigned long end_time; | 38 | unsigned long end_time; |
@@ -38,26 +47,46 @@ static int try_to_freeze_tasks(bool user_only) | |||
38 | 47 | ||
39 | end_time = jiffies + TIMEOUT; | 48 | end_time = jiffies + TIMEOUT; |
40 | 49 | ||
41 | if (!user_only) | 50 | if (!sig_only) |
42 | freeze_workqueues_begin(); | 51 | freeze_workqueues_begin(); |
43 | 52 | ||
44 | while (true) { | 53 | while (true) { |
45 | todo = 0; | 54 | todo = 0; |
46 | read_lock(&tasklist_lock); | 55 | read_lock(&tasklist_lock); |
47 | do_each_thread(g, p) { | 56 | do_each_thread(g, p) { |
48 | if (p == current || !freeze_task(p)) | 57 | if (frozen(p) || !freezable(p)) |
58 | continue; | ||
59 | |||
60 | if (!freeze_task(p, sig_only)) | ||
49 | continue; | 61 | continue; |
50 | 62 | ||
51 | if (!freezer_should_skip(p)) | 63 | /* |
64 | * Now that we've done set_freeze_flag, don't | ||
65 | * perturb a task in TASK_STOPPED or TASK_TRACED. | ||
66 | * It is "frozen enough". If the task does wake | ||
67 | * up, it will immediately call try_to_freeze. | ||
68 | * | ||
69 | * Because freeze_task() goes through p's | ||
70 | * scheduler lock after setting TIF_FREEZE, it's | ||
71 | * guaranteed that either we see TASK_RUNNING or | ||
72 | * try_to_stop() after schedule() in ptrace/signal | ||
73 | * stop sees TIF_FREEZE. | ||
74 | */ | ||
75 | if (!task_is_stopped_or_traced(p) && | ||
76 | !freezer_should_skip(p)) | ||
52 | todo++; | 77 | todo++; |
53 | } while_each_thread(g, p); | 78 | } while_each_thread(g, p); |
54 | read_unlock(&tasklist_lock); | 79 | read_unlock(&tasklist_lock); |
55 | 80 | ||
56 | if (!user_only) { | 81 | if (!sig_only) { |
57 | wq_busy = freeze_workqueues_busy(); | 82 | wq_busy = freeze_workqueues_busy(); |
58 | todo += wq_busy; | 83 | todo += wq_busy; |
59 | } | 84 | } |
60 | 85 | ||
86 | if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { | ||
87 | wakeup = 1; | ||
88 | break; | ||
89 | } | ||
61 | if (!todo || time_after(jiffies, end_time)) | 90 | if (!todo || time_after(jiffies, end_time)) |
62 | break; | 91 | break; |
63 | 92 | ||
@@ -68,7 +97,7 @@ static int try_to_freeze_tasks(bool user_only) | |||
68 | 97 | ||
69 | /* | 98 | /* |
70 | * We need to retry, but first give the freezing tasks some | 99 | * We need to retry, but first give the freezing tasks some |
71 | * time to enter the refrigerator. | 100 | * time to enter the regrigerator. |
72 | */ | 101 | */ |
73 | msleep(10); | 102 | msleep(10); |
74 | } | 103 | } |
@@ -79,22 +108,35 @@ static int try_to_freeze_tasks(bool user_only) | |||
79 | elapsed_csecs = elapsed_csecs64; | 108 | elapsed_csecs = elapsed_csecs64; |
80 | 109 | ||
81 | if (todo) { | 110 | if (todo) { |
82 | printk("\n"); | 111 | /* This does not unfreeze processes that are already frozen |
83 | printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " | 112 | * (we have slightly ugly calling convention in that respect, |
84 | "(%d tasks refusing to freeze, wq_busy=%d):\n", | 113 | * and caller must call thaw_processes() if something fails), |
85 | wakeup ? "aborted" : "failed", | 114 | * but it cleans up leftover PF_FREEZE requests. |
86 | elapsed_csecs / 100, elapsed_csecs % 100, | 115 | */ |
87 | todo - wq_busy, wq_busy); | 116 | if(wakeup) { |
88 | 117 | printk("\n"); | |
89 | if (!wakeup) { | 118 | printk(KERN_ERR "Freezing of %s aborted\n", |
90 | read_lock(&tasklist_lock); | 119 | sig_only ? "user space " : "tasks "); |
91 | do_each_thread(g, p) { | ||
92 | if (p != current && !freezer_should_skip(p) | ||
93 | && freezing(p) && !frozen(p)) | ||
94 | sched_show_task(p); | ||
95 | } while_each_thread(g, p); | ||
96 | read_unlock(&tasklist_lock); | ||
97 | } | 120 | } |
121 | else { | ||
122 | printk("\n"); | ||
123 | printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " | ||
124 | "(%d tasks refusing to freeze, wq_busy=%d):\n", | ||
125 | elapsed_csecs / 100, elapsed_csecs % 100, | ||
126 | todo - wq_busy, wq_busy); | ||
127 | } | ||
128 | thaw_workqueues(); | ||
129 | |||
130 | read_lock(&tasklist_lock); | ||
131 | do_each_thread(g, p) { | ||
132 | task_lock(p); | ||
133 | if (freezing(p) && !freezer_should_skip(p) && | ||
134 | elapsed_csecs > 100) | ||
135 | sched_show_task(p); | ||
136 | cancel_freezing(p); | ||
137 | task_unlock(p); | ||
138 | } while_each_thread(g, p); | ||
139 | read_unlock(&tasklist_lock); | ||
98 | } else { | 140 | } else { |
99 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, | 141 | printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, |
100 | elapsed_csecs % 100); | 142 | elapsed_csecs % 100); |
@@ -104,106 +146,61 @@ static int try_to_freeze_tasks(bool user_only) | |||
104 | } | 146 | } |
105 | 147 | ||
106 | /** | 148 | /** |
107 | * freeze_processes - Signal user space processes to enter the refrigerator. | 149 | * freeze_processes - tell processes to enter the refrigerator |
108 | * | ||
109 | * On success, returns 0. On failure, -errno and system is fully thawed. | ||
110 | */ | 150 | */ |
111 | int freeze_processes(void) | 151 | int freeze_processes(void) |
112 | { | 152 | { |
113 | int error; | 153 | int error; |
114 | 154 | ||
115 | error = __usermodehelper_disable(UMH_FREEZING); | ||
116 | if (error) | ||
117 | return error; | ||
118 | |||
119 | if (!pm_freezing) | ||
120 | atomic_inc(&system_freezing_cnt); | ||
121 | |||
122 | printk("Freezing user space processes ... "); | 155 | printk("Freezing user space processes ... "); |
123 | pm_freezing = true; | ||
124 | error = try_to_freeze_tasks(true); | 156 | error = try_to_freeze_tasks(true); |
125 | if (!error) { | ||
126 | printk("done."); | ||
127 | __usermodehelper_set_disable_depth(UMH_DISABLED); | ||
128 | oom_killer_disable(); | ||
129 | } | ||
130 | printk("\n"); | ||
131 | BUG_ON(in_atomic()); | ||
132 | |||
133 | if (error) | 157 | if (error) |
134 | thaw_processes(); | 158 | goto Exit; |
135 | return error; | 159 | printk("done.\n"); |
136 | } | ||
137 | |||
138 | /** | ||
139 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. | ||
140 | * | ||
141 | * On success, returns 0. On failure, -errno and only the kernel threads are | ||
142 | * thawed, so as to give a chance to the caller to do additional cleanups | ||
143 | * (if any) before thawing the userspace tasks. So, it is the responsibility | ||
144 | * of the caller to thaw the userspace tasks, when the time is right. | ||
145 | */ | ||
146 | int freeze_kernel_threads(void) | ||
147 | { | ||
148 | int error; | ||
149 | 160 | ||
150 | printk("Freezing remaining freezable tasks ... "); | 161 | printk("Freezing remaining freezable tasks ... "); |
151 | pm_nosig_freezing = true; | ||
152 | error = try_to_freeze_tasks(false); | 162 | error = try_to_freeze_tasks(false); |
153 | if (!error) | 163 | if (error) |
154 | printk("done."); | 164 | goto Exit; |
165 | printk("done."); | ||
155 | 166 | ||
156 | printk("\n"); | 167 | oom_killer_disable(); |
168 | Exit: | ||
157 | BUG_ON(in_atomic()); | 169 | BUG_ON(in_atomic()); |
170 | printk("\n"); | ||
158 | 171 | ||
159 | if (error) | ||
160 | thaw_kernel_threads(); | ||
161 | return error; | 172 | return error; |
162 | } | 173 | } |
163 | 174 | ||
164 | void thaw_processes(void) | 175 | static void thaw_tasks(bool nosig_only) |
165 | { | 176 | { |
166 | struct task_struct *g, *p; | 177 | struct task_struct *g, *p; |
167 | 178 | ||
168 | if (pm_freezing) | 179 | read_lock(&tasklist_lock); |
169 | atomic_dec(&system_freezing_cnt); | 180 | do_each_thread(g, p) { |
170 | pm_freezing = false; | 181 | if (!freezable(p)) |
171 | pm_nosig_freezing = false; | 182 | continue; |
172 | |||
173 | oom_killer_enable(); | ||
174 | 183 | ||
175 | printk("Restarting tasks ... "); | 184 | if (nosig_only && should_send_signal(p)) |
185 | continue; | ||
176 | 186 | ||
177 | thaw_workqueues(); | 187 | if (cgroup_freezing_or_frozen(p)) |
188 | continue; | ||
178 | 189 | ||
179 | read_lock(&tasklist_lock); | 190 | thaw_process(p); |
180 | do_each_thread(g, p) { | ||
181 | __thaw_task(p); | ||
182 | } while_each_thread(g, p); | 191 | } while_each_thread(g, p); |
183 | read_unlock(&tasklist_lock); | 192 | read_unlock(&tasklist_lock); |
184 | |||
185 | usermodehelper_enable(); | ||
186 | |||
187 | schedule(); | ||
188 | printk("done.\n"); | ||
189 | } | 193 | } |
190 | 194 | ||
191 | void thaw_kernel_threads(void) | 195 | void thaw_processes(void) |
192 | { | 196 | { |
193 | struct task_struct *g, *p; | 197 | oom_killer_enable(); |
194 | |||
195 | pm_nosig_freezing = false; | ||
196 | printk("Restarting kernel threads ... "); | ||
197 | 198 | ||
199 | printk("Restarting tasks ... "); | ||
198 | thaw_workqueues(); | 200 | thaw_workqueues(); |
199 | 201 | thaw_tasks(true); | |
200 | read_lock(&tasklist_lock); | 202 | thaw_tasks(false); |
201 | do_each_thread(g, p) { | ||
202 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) | ||
203 | __thaw_task(p); | ||
204 | } while_each_thread(g, p); | ||
205 | read_unlock(&tasklist_lock); | ||
206 | |||
207 | schedule(); | 203 | schedule(); |
208 | printk("done.\n"); | 204 | printk("done.\n"); |
209 | } | 205 | } |
206 | |||
diff --git a/kernel/power/qos.c b/kernel/power/qos.c deleted file mode 100644 index 9322ff7eaad..00000000000 --- a/kernel/power/qos.c +++ /dev/null | |||
@@ -1,602 +0,0 @@ | |||
1 | /* | ||
2 | * This module exposes the interface to kernel space for specifying | ||
3 | * QoS dependencies. It provides infrastructure for registration of: | ||
4 | * | ||
5 | * Dependents on a QoS value : register requests | ||
6 | * Watchers of QoS value : get notified when target QoS value changes | ||
7 | * | ||
8 | * This QoS design is best effort based. Dependents register their QoS needs. | ||
9 | * Watchers register to keep track of the current QoS needs of the system. | ||
10 | * | ||
11 | * There are 3 basic classes of QoS parameter: latency, timeout, throughput | ||
12 | * each have defined units: | ||
13 | * latency: usec | ||
14 | * timeout: usec <-- currently not used. | ||
15 | * throughput: kbs (kilo byte / sec) | ||
16 | * | ||
17 | * There are lists of pm_qos_objects each one wrapping requests, notifiers | ||
18 | * | ||
19 | * User mode requests on a QOS parameter register themselves to the | ||
20 | * subsystem by opening the device node /dev/... and writing there request to | ||
21 | * the node. As long as the process holds a file handle open to the node the | ||
22 | * client continues to be accounted for. Upon file release the usermode | ||
23 | * request is removed and a new qos target is computed. This way when the | ||
24 | * request that the application has is cleaned up when closes the file | ||
25 | * pointer or exits the pm_qos_object will get an opportunity to clean up. | ||
26 | * | ||
27 | * Mark Gross <mgross@linux.intel.com> | ||
28 | */ | ||
29 | |||
30 | /*#define DEBUG*/ | ||
31 | |||
32 | #include <linux/pm_qos.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/time.h> | ||
37 | #include <linux/fs.h> | ||
38 | #include <linux/device.h> | ||
39 | #include <linux/miscdevice.h> | ||
40 | #include <linux/string.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/kernel.h> | ||
44 | |||
45 | #include <linux/uaccess.h> | ||
46 | #include <linux/export.h> | ||
47 | |||
48 | /* | ||
49 | * locking rule: all changes to constraints or notifiers lists | ||
50 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | ||
51 | * held, taken with _irqsave. One lock to rule them all | ||
52 | */ | ||
53 | struct pm_qos_object { | ||
54 | struct pm_qos_constraints *constraints; | ||
55 | struct miscdevice pm_qos_power_miscdev; | ||
56 | char *name; | ||
57 | }; | ||
58 | |||
59 | static DEFINE_SPINLOCK(pm_qos_lock); | ||
60 | |||
61 | static struct pm_qos_object null_pm_qos; | ||
62 | |||
63 | static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); | ||
64 | static struct pm_qos_constraints cpu_dma_constraints = { | ||
65 | .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), | ||
66 | .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | ||
67 | .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, | ||
68 | .type = PM_QOS_MIN, | ||
69 | .notifiers = &cpu_dma_lat_notifier, | ||
70 | }; | ||
71 | static struct pm_qos_object cpu_dma_pm_qos = { | ||
72 | .constraints = &cpu_dma_constraints, | ||
73 | .name = "cpu_dma_latency", | ||
74 | }; | ||
75 | |||
76 | static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); | ||
77 | static struct pm_qos_constraints network_lat_constraints = { | ||
78 | .list = PLIST_HEAD_INIT(network_lat_constraints.list), | ||
79 | .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | ||
80 | .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, | ||
81 | .type = PM_QOS_MIN, | ||
82 | .notifiers = &network_lat_notifier, | ||
83 | }; | ||
84 | static struct pm_qos_object network_lat_pm_qos = { | ||
85 | .constraints = &network_lat_constraints, | ||
86 | .name = "network_latency", | ||
87 | }; | ||
88 | |||
89 | |||
90 | static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); | ||
91 | static struct pm_qos_constraints network_tput_constraints = { | ||
92 | .list = PLIST_HEAD_INIT(network_tput_constraints.list), | ||
93 | .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | ||
94 | .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, | ||
95 | .type = PM_QOS_MAX, | ||
96 | .notifiers = &network_throughput_notifier, | ||
97 | }; | ||
98 | static struct pm_qos_object network_throughput_pm_qos = { | ||
99 | .constraints = &network_tput_constraints, | ||
100 | .name = "network_throughput", | ||
101 | }; | ||
102 | |||
103 | |||
104 | static struct pm_qos_object *pm_qos_array[] = { | ||
105 | &null_pm_qos, | ||
106 | &cpu_dma_pm_qos, | ||
107 | &network_lat_pm_qos, | ||
108 | &network_throughput_pm_qos | ||
109 | }; | ||
110 | |||
111 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | ||
112 | size_t count, loff_t *f_pos); | ||
113 | static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, | ||
114 | size_t count, loff_t *f_pos); | ||
115 | static int pm_qos_power_open(struct inode *inode, struct file *filp); | ||
116 | static int pm_qos_power_release(struct inode *inode, struct file *filp); | ||
117 | |||
118 | static const struct file_operations pm_qos_power_fops = { | ||
119 | .write = pm_qos_power_write, | ||
120 | .read = pm_qos_power_read, | ||
121 | .open = pm_qos_power_open, | ||
122 | .release = pm_qos_power_release, | ||
123 | .llseek = noop_llseek, | ||
124 | }; | ||
125 | |||
126 | /* unlocked internal variant */ | ||
127 | static inline int pm_qos_get_value(struct pm_qos_constraints *c) | ||
128 | { | ||
129 | if (plist_head_empty(&c->list)) | ||
130 | return c->default_value; | ||
131 | |||
132 | switch (c->type) { | ||
133 | case PM_QOS_MIN: | ||
134 | return plist_first(&c->list)->prio; | ||
135 | |||
136 | case PM_QOS_MAX: | ||
137 | return plist_last(&c->list)->prio; | ||
138 | |||
139 | default: | ||
140 | /* runtime check for not using enum */ | ||
141 | BUG(); | ||
142 | return PM_QOS_DEFAULT_VALUE; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | s32 pm_qos_read_value(struct pm_qos_constraints *c) | ||
147 | { | ||
148 | return c->target_value; | ||
149 | } | ||
150 | |||
151 | static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) | ||
152 | { | ||
153 | c->target_value = value; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * pm_qos_update_target - manages the constraints list and calls the notifiers | ||
158 | * if needed | ||
159 | * @c: constraints data struct | ||
160 | * @node: request to add to the list, to update or to remove | ||
161 | * @action: action to take on the constraints list | ||
162 | * @value: value of the request to add or update | ||
163 | * | ||
164 | * This function returns 1 if the aggregated constraint value has changed, 0 | ||
165 | * otherwise. | ||
166 | */ | ||
167 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, | ||
168 | enum pm_qos_req_action action, int value) | ||
169 | { | ||
170 | unsigned long flags; | ||
171 | int prev_value, curr_value, new_value; | ||
172 | |||
173 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
174 | prev_value = pm_qos_get_value(c); | ||
175 | if (value == PM_QOS_DEFAULT_VALUE) | ||
176 | new_value = c->default_value; | ||
177 | else | ||
178 | new_value = value; | ||
179 | |||
180 | switch (action) { | ||
181 | case PM_QOS_REMOVE_REQ: | ||
182 | plist_del(node, &c->list); | ||
183 | break; | ||
184 | case PM_QOS_UPDATE_REQ: | ||
185 | /* | ||
186 | * to change the list, we atomically remove, reinit | ||
187 | * with new value and add, then see if the extremal | ||
188 | * changed | ||
189 | */ | ||
190 | plist_del(node, &c->list); | ||
191 | case PM_QOS_ADD_REQ: | ||
192 | plist_node_init(node, new_value); | ||
193 | plist_add(node, &c->list); | ||
194 | break; | ||
195 | default: | ||
196 | /* no action */ | ||
197 | ; | ||
198 | } | ||
199 | |||
200 | curr_value = pm_qos_get_value(c); | ||
201 | pm_qos_set_value(c, curr_value); | ||
202 | |||
203 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
204 | |||
205 | if (prev_value != curr_value) { | ||
206 | blocking_notifier_call_chain(c->notifiers, | ||
207 | (unsigned long)curr_value, | ||
208 | NULL); | ||
209 | return 1; | ||
210 | } else { | ||
211 | return 0; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * pm_qos_flags_remove_req - Remove device PM QoS flags request. | ||
217 | * @pqf: Device PM QoS flags set to remove the request from. | ||
218 | * @req: Request to remove from the set. | ||
219 | */ | ||
220 | static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, | ||
221 | struct pm_qos_flags_request *req) | ||
222 | { | ||
223 | s32 val = 0; | ||
224 | |||
225 | list_del(&req->node); | ||
226 | list_for_each_entry(req, &pqf->list, node) | ||
227 | val |= req->flags; | ||
228 | |||
229 | pqf->effective_flags = val; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * pm_qos_update_flags - Update a set of PM QoS flags. | ||
234 | * @pqf: Set of flags to update. | ||
235 | * @req: Request to add to the set, to modify, or to remove from the set. | ||
236 | * @action: Action to take on the set. | ||
237 | * @val: Value of the request to add or modify. | ||
238 | * | ||
239 | * Update the given set of PM QoS flags and call notifiers if the aggregate | ||
240 | * value has changed. Returns 1 if the aggregate constraint value has changed, | ||
241 | * 0 otherwise. | ||
242 | */ | ||
243 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, | ||
244 | struct pm_qos_flags_request *req, | ||
245 | enum pm_qos_req_action action, s32 val) | ||
246 | { | ||
247 | unsigned long irqflags; | ||
248 | s32 prev_value, curr_value; | ||
249 | |||
250 | spin_lock_irqsave(&pm_qos_lock, irqflags); | ||
251 | |||
252 | prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
253 | |||
254 | switch (action) { | ||
255 | case PM_QOS_REMOVE_REQ: | ||
256 | pm_qos_flags_remove_req(pqf, req); | ||
257 | break; | ||
258 | case PM_QOS_UPDATE_REQ: | ||
259 | pm_qos_flags_remove_req(pqf, req); | ||
260 | case PM_QOS_ADD_REQ: | ||
261 | req->flags = val; | ||
262 | INIT_LIST_HEAD(&req->node); | ||
263 | list_add_tail(&req->node, &pqf->list); | ||
264 | pqf->effective_flags |= val; | ||
265 | break; | ||
266 | default: | ||
267 | /* no action */ | ||
268 | ; | ||
269 | } | ||
270 | |||
271 | curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; | ||
272 | |||
273 | spin_unlock_irqrestore(&pm_qos_lock, irqflags); | ||
274 | |||
275 | return prev_value != curr_value; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * pm_qos_request - returns current system wide qos expectation | ||
280 | * @pm_qos_class: identification of which qos value is requested | ||
281 | * | ||
282 | * This function returns the current target value. | ||
283 | */ | ||
284 | int pm_qos_request(int pm_qos_class) | ||
285 | { | ||
286 | return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints); | ||
287 | } | ||
288 | EXPORT_SYMBOL_GPL(pm_qos_request); | ||
289 | |||
290 | int pm_qos_request_active(struct pm_qos_request *req) | ||
291 | { | ||
292 | return req->pm_qos_class != 0; | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | ||
295 | |||
296 | /** | ||
297 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout | ||
298 | * @work: work struct for the delayed work (timeout) | ||
299 | * | ||
300 | * This cancels the timeout request by falling back to the default at timeout. | ||
301 | */ | ||
302 | static void pm_qos_work_fn(struct work_struct *work) | ||
303 | { | ||
304 | struct pm_qos_request *req = container_of(to_delayed_work(work), | ||
305 | struct pm_qos_request, | ||
306 | work); | ||
307 | |||
308 | pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); | ||
309 | } | ||
310 | |||
311 | /** | ||
312 | * pm_qos_add_request - inserts new qos request into the list | ||
313 | * @req: pointer to a preallocated handle | ||
314 | * @pm_qos_class: identifies which list of qos request to use | ||
315 | * @value: defines the qos request | ||
316 | * | ||
317 | * This function inserts a new entry in the pm_qos_class list of requested qos | ||
318 | * performance characteristics. It recomputes the aggregate QoS expectations | ||
319 | * for the pm_qos_class of parameters and initializes the pm_qos_request | ||
320 | * handle. Caller needs to save this handle for later use in updates and | ||
321 | * removal. | ||
322 | */ | ||
323 | |||
324 | void pm_qos_add_request(struct pm_qos_request *req, | ||
325 | int pm_qos_class, s32 value) | ||
326 | { | ||
327 | if (!req) /*guard against callers passing in null */ | ||
328 | return; | ||
329 | |||
330 | if (pm_qos_request_active(req)) { | ||
331 | WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); | ||
332 | return; | ||
333 | } | ||
334 | req->pm_qos_class = pm_qos_class; | ||
335 | INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); | ||
336 | pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, | ||
337 | &req->node, PM_QOS_ADD_REQ, value); | ||
338 | } | ||
339 | EXPORT_SYMBOL_GPL(pm_qos_add_request); | ||
340 | |||
341 | /** | ||
342 | * pm_qos_update_request - modifies an existing qos request | ||
343 | * @req : handle to list element holding a pm_qos request to use | ||
344 | * @value: defines the qos request | ||
345 | * | ||
346 | * Updates an existing qos request for the pm_qos_class of parameters along | ||
347 | * with updating the target pm_qos_class value. | ||
348 | * | ||
349 | * Attempts are made to make this code callable on hot code paths. | ||
350 | */ | ||
351 | void pm_qos_update_request(struct pm_qos_request *req, | ||
352 | s32 new_value) | ||
353 | { | ||
354 | if (!req) /*guard against callers passing in null */ | ||
355 | return; | ||
356 | |||
357 | if (!pm_qos_request_active(req)) { | ||
358 | WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); | ||
359 | return; | ||
360 | } | ||
361 | |||
362 | if (delayed_work_pending(&req->work)) | ||
363 | cancel_delayed_work_sync(&req->work); | ||
364 | |||
365 | if (new_value != req->node.prio) | ||
366 | pm_qos_update_target( | ||
367 | pm_qos_array[req->pm_qos_class]->constraints, | ||
368 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
369 | } | ||
370 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | ||
371 | |||
372 | /** | ||
373 | * pm_qos_update_request_timeout - modifies an existing qos request temporarily. | ||
374 | * @req : handle to list element holding a pm_qos request to use | ||
375 | * @new_value: defines the temporal qos request | ||
376 | * @timeout_us: the effective duration of this qos request in usecs. | ||
377 | * | ||
378 | * After timeout_us, this qos request is cancelled automatically. | ||
379 | */ | ||
380 | void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, | ||
381 | unsigned long timeout_us) | ||
382 | { | ||
383 | if (!req) | ||
384 | return; | ||
385 | if (WARN(!pm_qos_request_active(req), | ||
386 | "%s called for unknown object.", __func__)) | ||
387 | return; | ||
388 | |||
389 | if (delayed_work_pending(&req->work)) | ||
390 | cancel_delayed_work_sync(&req->work); | ||
391 | |||
392 | if (new_value != req->node.prio) | ||
393 | pm_qos_update_target( | ||
394 | pm_qos_array[req->pm_qos_class]->constraints, | ||
395 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
396 | |||
397 | schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); | ||
398 | } | ||
399 | |||
400 | /** | ||
401 | * pm_qos_remove_request - modifies an existing qos request | ||
402 | * @req: handle to request list element | ||
403 | * | ||
404 | * Will remove pm qos request from the list of constraints and | ||
405 | * recompute the current target value for the pm_qos_class. Call this | ||
406 | * on slow code paths. | ||
407 | */ | ||
408 | void pm_qos_remove_request(struct pm_qos_request *req) | ||
409 | { | ||
410 | if (!req) /*guard against callers passing in null */ | ||
411 | return; | ||
412 | /* silent return to keep pcm code cleaner */ | ||
413 | |||
414 | if (!pm_qos_request_active(req)) { | ||
415 | WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); | ||
416 | return; | ||
417 | } | ||
418 | |||
419 | if (delayed_work_pending(&req->work)) | ||
420 | cancel_delayed_work_sync(&req->work); | ||
421 | |||
422 | pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, | ||
423 | &req->node, PM_QOS_REMOVE_REQ, | ||
424 | PM_QOS_DEFAULT_VALUE); | ||
425 | memset(req, 0, sizeof(*req)); | ||
426 | } | ||
427 | EXPORT_SYMBOL_GPL(pm_qos_remove_request); | ||
428 | |||
429 | /** | ||
430 | * pm_qos_add_notifier - sets notification entry for changes to target value | ||
431 | * @pm_qos_class: identifies which qos target changes should be notified. | ||
432 | * @notifier: notifier block managed by caller. | ||
433 | * | ||
434 | * will register the notifier into a notification chain that gets called | ||
435 | * upon changes to the pm_qos_class target value. | ||
436 | */ | ||
437 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) | ||
438 | { | ||
439 | int retval; | ||
440 | |||
441 | retval = blocking_notifier_chain_register( | ||
442 | pm_qos_array[pm_qos_class]->constraints->notifiers, | ||
443 | notifier); | ||
444 | |||
445 | return retval; | ||
446 | } | ||
447 | EXPORT_SYMBOL_GPL(pm_qos_add_notifier); | ||
448 | |||
449 | /** | ||
450 | * pm_qos_remove_notifier - deletes notification entry from chain. | ||
451 | * @pm_qos_class: identifies which qos target changes are notified. | ||
452 | * @notifier: notifier block to be removed. | ||
453 | * | ||
454 | * will remove the notifier from the notification chain that gets called | ||
455 | * upon changes to the pm_qos_class target value. | ||
456 | */ | ||
457 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) | ||
458 | { | ||
459 | int retval; | ||
460 | |||
461 | retval = blocking_notifier_chain_unregister( | ||
462 | pm_qos_array[pm_qos_class]->constraints->notifiers, | ||
463 | notifier); | ||
464 | |||
465 | return retval; | ||
466 | } | ||
467 | EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); | ||
468 | |||
469 | /* User space interface to PM QoS classes via misc devices */ | ||
470 | static int register_pm_qos_misc(struct pm_qos_object *qos) | ||
471 | { | ||
472 | qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; | ||
473 | qos->pm_qos_power_miscdev.name = qos->name; | ||
474 | qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; | ||
475 | |||
476 | return misc_register(&qos->pm_qos_power_miscdev); | ||
477 | } | ||
478 | |||
479 | static int find_pm_qos_object_by_minor(int minor) | ||
480 | { | ||
481 | int pm_qos_class; | ||
482 | |||
483 | for (pm_qos_class = 0; | ||
484 | pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) { | ||
485 | if (minor == | ||
486 | pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor) | ||
487 | return pm_qos_class; | ||
488 | } | ||
489 | return -1; | ||
490 | } | ||
491 | |||
492 | static int pm_qos_power_open(struct inode *inode, struct file *filp) | ||
493 | { | ||
494 | long pm_qos_class; | ||
495 | |||
496 | pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); | ||
497 | if (pm_qos_class >= 0) { | ||
498 | struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL); | ||
499 | if (!req) | ||
500 | return -ENOMEM; | ||
501 | |||
502 | pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); | ||
503 | filp->private_data = req; | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | return -EPERM; | ||
508 | } | ||
509 | |||
510 | static int pm_qos_power_release(struct inode *inode, struct file *filp) | ||
511 | { | ||
512 | struct pm_qos_request *req; | ||
513 | |||
514 | req = filp->private_data; | ||
515 | pm_qos_remove_request(req); | ||
516 | kfree(req); | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | |||
522 | static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, | ||
523 | size_t count, loff_t *f_pos) | ||
524 | { | ||
525 | s32 value; | ||
526 | unsigned long flags; | ||
527 | struct pm_qos_request *req = filp->private_data; | ||
528 | |||
529 | if (!req) | ||
530 | return -EINVAL; | ||
531 | if (!pm_qos_request_active(req)) | ||
532 | return -EINVAL; | ||
533 | |||
534 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
535 | value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints); | ||
536 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
537 | |||
538 | return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); | ||
539 | } | ||
540 | |||
541 | static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | ||
542 | size_t count, loff_t *f_pos) | ||
543 | { | ||
544 | s32 value; | ||
545 | struct pm_qos_request *req; | ||
546 | |||
547 | if (count == sizeof(s32)) { | ||
548 | if (copy_from_user(&value, buf, sizeof(s32))) | ||
549 | return -EFAULT; | ||
550 | } else if (count <= 11) { /* ASCII perhaps? */ | ||
551 | char ascii_value[11]; | ||
552 | unsigned long int ulval; | ||
553 | int ret; | ||
554 | |||
555 | if (copy_from_user(ascii_value, buf, count)) | ||
556 | return -EFAULT; | ||
557 | |||
558 | if (count > 10) { | ||
559 | if (ascii_value[10] == '\n') | ||
560 | ascii_value[10] = '\0'; | ||
561 | else | ||
562 | return -EINVAL; | ||
563 | } else { | ||
564 | ascii_value[count] = '\0'; | ||
565 | } | ||
566 | ret = kstrtoul(ascii_value, 16, &ulval); | ||
567 | if (ret) { | ||
568 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); | ||
569 | return -EINVAL; | ||
570 | } | ||
571 | value = (s32)lower_32_bits(ulval); | ||
572 | } else { | ||
573 | return -EINVAL; | ||
574 | } | ||
575 | |||
576 | req = filp->private_data; | ||
577 | pm_qos_update_request(req, value); | ||
578 | |||
579 | return count; | ||
580 | } | ||
581 | |||
582 | |||
583 | static int __init pm_qos_power_init(void) | ||
584 | { | ||
585 | int ret = 0; | ||
586 | int i; | ||
587 | |||
588 | BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES); | ||
589 | |||
590 | for (i = 1; i < PM_QOS_NUM_CLASSES; i++) { | ||
591 | ret = register_pm_qos_misc(pm_qos_array[i]); | ||
592 | if (ret < 0) { | ||
593 | printk(KERN_ERR "pm_qos_param: %s setup failed\n", | ||
594 | pm_qos_array[i]->name); | ||
595 | return ret; | ||
596 | } | ||
597 | } | ||
598 | |||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | late_initcall(pm_qos_power_init); | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0de28576807..06efa54f93d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -711,10 +711,9 @@ static void mark_nosave_pages(struct memory_bitmap *bm) | |||
711 | list_for_each_entry(region, &nosave_regions, list) { | 711 | list_for_each_entry(region, &nosave_regions, list) { |
712 | unsigned long pfn; | 712 | unsigned long pfn; |
713 | 713 | ||
714 | pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", | 714 | pr_debug("PM: Marking nosave pages: %016lx - %016lx\n", |
715 | (unsigned long long) region->start_pfn << PAGE_SHIFT, | 715 | region->start_pfn << PAGE_SHIFT, |
716 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) | 716 | region->end_pfn << PAGE_SHIFT); |
717 | - 1); | ||
718 | 717 | ||
719 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) | 718 | for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) |
720 | if (pfn_valid(pfn)) { | 719 | if (pfn_valid(pfn)) { |
@@ -813,8 +812,7 @@ unsigned int snapshot_additional_pages(struct zone *zone) | |||
813 | unsigned int res; | 812 | unsigned int res; |
814 | 813 | ||
815 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | 814 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); |
816 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), | 815 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); |
817 | LINKED_PAGE_DATA_SIZE); | ||
818 | return 2 * res; | 816 | return 2 * res; |
819 | } | 817 | } |
820 | 818 | ||
@@ -860,9 +858,6 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) | |||
860 | PageReserved(page)) | 858 | PageReserved(page)) |
861 | return NULL; | 859 | return NULL; |
862 | 860 | ||
863 | if (page_is_guard(page)) | ||
864 | return NULL; | ||
865 | |||
866 | return page; | 861 | return page; |
867 | } | 862 | } |
868 | 863 | ||
@@ -925,9 +920,6 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |||
925 | && (!kernel_page_present(page) || pfn_is_nosave(pfn))) | 920 | && (!kernel_page_present(page) || pfn_is_nosave(pfn))) |
926 | return NULL; | 921 | return NULL; |
927 | 922 | ||
928 | if (page_is_guard(page)) | ||
929 | return NULL; | ||
930 | |||
931 | return page; | 923 | return page; |
932 | } | 924 | } |
933 | 925 | ||
@@ -1001,20 +993,20 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) | |||
1001 | s_page = pfn_to_page(src_pfn); | 993 | s_page = pfn_to_page(src_pfn); |
1002 | d_page = pfn_to_page(dst_pfn); | 994 | d_page = pfn_to_page(dst_pfn); |
1003 | if (PageHighMem(s_page)) { | 995 | if (PageHighMem(s_page)) { |
1004 | src = kmap_atomic(s_page); | 996 | src = kmap_atomic(s_page, KM_USER0); |
1005 | dst = kmap_atomic(d_page); | 997 | dst = kmap_atomic(d_page, KM_USER1); |
1006 | do_copy_page(dst, src); | 998 | do_copy_page(dst, src); |
1007 | kunmap_atomic(dst); | 999 | kunmap_atomic(dst, KM_USER1); |
1008 | kunmap_atomic(src); | 1000 | kunmap_atomic(src, KM_USER0); |
1009 | } else { | 1001 | } else { |
1010 | if (PageHighMem(d_page)) { | 1002 | if (PageHighMem(d_page)) { |
1011 | /* Page pointed to by src may contain some kernel | 1003 | /* Page pointed to by src may contain some kernel |
1012 | * data modified by kmap_atomic() | 1004 | * data modified by kmap_atomic() |
1013 | */ | 1005 | */ |
1014 | safe_copy_page(buffer, s_page); | 1006 | safe_copy_page(buffer, s_page); |
1015 | dst = kmap_atomic(d_page); | 1007 | dst = kmap_atomic(d_page, KM_USER0); |
1016 | copy_page(dst, buffer); | 1008 | copy_page(dst, buffer); |
1017 | kunmap_atomic(dst); | 1009 | kunmap_atomic(dst, KM_USER0); |
1018 | } else { | 1010 | } else { |
1019 | safe_copy_page(page_address(d_page), s_page); | 1011 | safe_copy_page(page_address(d_page), s_page); |
1020 | } | 1012 | } |
@@ -1347,9 +1339,6 @@ int hibernate_preallocate_memory(void) | |||
1347 | count += highmem; | 1339 | count += highmem; |
1348 | count -= totalreserve_pages; | 1340 | count -= totalreserve_pages; |
1349 | 1341 | ||
1350 | /* Add number of pages required for page keys (s390 only). */ | ||
1351 | size += page_key_additional_pages(saveable); | ||
1352 | |||
1353 | /* Compute the maximum number of saveable pages to leave in memory. */ | 1342 | /* Compute the maximum number of saveable pages to leave in memory. */ |
1354 | max_size = (count - (size + PAGES_FOR_IO)) / 2 | 1343 | max_size = (count - (size + PAGES_FOR_IO)) / 2 |
1355 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); | 1344 | - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); |
@@ -1673,8 +1662,6 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
1673 | buf[j] = memory_bm_next_pfn(bm); | 1662 | buf[j] = memory_bm_next_pfn(bm); |
1674 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1663 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
1675 | break; | 1664 | break; |
1676 | /* Save page key for data page (s390 only). */ | ||
1677 | page_key_read(buf + j); | ||
1678 | } | 1665 | } |
1679 | } | 1666 | } |
1680 | 1667 | ||
@@ -1729,9 +1716,9 @@ int snapshot_read_next(struct snapshot_handle *handle) | |||
1729 | */ | 1716 | */ |
1730 | void *kaddr; | 1717 | void *kaddr; |
1731 | 1718 | ||
1732 | kaddr = kmap_atomic(page); | 1719 | kaddr = kmap_atomic(page, KM_USER0); |
1733 | copy_page(buffer, kaddr); | 1720 | copy_page(buffer, kaddr); |
1734 | kunmap_atomic(kaddr); | 1721 | kunmap_atomic(kaddr, KM_USER0); |
1735 | handle->buffer = buffer; | 1722 | handle->buffer = buffer; |
1736 | } else { | 1723 | } else { |
1737 | handle->buffer = page_address(page); | 1724 | handle->buffer = page_address(page); |
@@ -1834,9 +1821,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |||
1834 | if (unlikely(buf[j] == BM_END_OF_MAP)) | 1821 | if (unlikely(buf[j] == BM_END_OF_MAP)) |
1835 | break; | 1822 | break; |
1836 | 1823 | ||
1837 | /* Extract and buffer page key for data page (s390 only). */ | ||
1838 | page_key_memorize(buf + j); | ||
1839 | |||
1840 | if (memory_bm_pfn_present(bm, buf[j])) | 1824 | if (memory_bm_pfn_present(bm, buf[j])) |
1841 | memory_bm_set_bit(bm, buf[j]); | 1825 | memory_bm_set_bit(bm, buf[j]); |
1842 | else | 1826 | else |
@@ -2015,9 +1999,9 @@ static void copy_last_highmem_page(void) | |||
2015 | if (last_highmem_page) { | 1999 | if (last_highmem_page) { |
2016 | void *dst; | 2000 | void *dst; |
2017 | 2001 | ||
2018 | dst = kmap_atomic(last_highmem_page); | 2002 | dst = kmap_atomic(last_highmem_page, KM_USER0); |
2019 | copy_page(dst, buffer); | 2003 | copy_page(dst, buffer); |
2020 | kunmap_atomic(dst); | 2004 | kunmap_atomic(dst, KM_USER0); |
2021 | last_highmem_page = NULL; | 2005 | last_highmem_page = NULL; |
2022 | } | 2006 | } |
2023 | } | 2007 | } |
@@ -2239,11 +2223,6 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2239 | if (error) | 2223 | if (error) |
2240 | return error; | 2224 | return error; |
2241 | 2225 | ||
2242 | /* Allocate buffer for page keys. */ | ||
2243 | error = page_key_alloc(nr_copy_pages); | ||
2244 | if (error) | ||
2245 | return error; | ||
2246 | |||
2247 | } else if (handle->cur <= nr_meta_pages + 1) { | 2226 | } else if (handle->cur <= nr_meta_pages + 1) { |
2248 | error = unpack_orig_pfns(buffer, ©_bm); | 2227 | error = unpack_orig_pfns(buffer, ©_bm); |
2249 | if (error) | 2228 | if (error) |
@@ -2264,8 +2243,6 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2264 | } | 2243 | } |
2265 | } else { | 2244 | } else { |
2266 | copy_last_highmem_page(); | 2245 | copy_last_highmem_page(); |
2267 | /* Restore page key for data page (s390 only). */ | ||
2268 | page_key_write(handle->buffer); | ||
2269 | handle->buffer = get_buffer(&orig_bm, &ca); | 2246 | handle->buffer = get_buffer(&orig_bm, &ca); |
2270 | if (IS_ERR(handle->buffer)) | 2247 | if (IS_ERR(handle->buffer)) |
2271 | return PTR_ERR(handle->buffer); | 2248 | return PTR_ERR(handle->buffer); |
@@ -2287,9 +2264,6 @@ int snapshot_write_next(struct snapshot_handle *handle) | |||
2287 | void snapshot_write_finalize(struct snapshot_handle *handle) | 2264 | void snapshot_write_finalize(struct snapshot_handle *handle) |
2288 | { | 2265 | { |
2289 | copy_last_highmem_page(); | 2266 | copy_last_highmem_page(); |
2290 | /* Restore page key for data page (s390 only). */ | ||
2291 | page_key_write(handle->buffer); | ||
2292 | page_key_free(); | ||
2293 | /* Free only if we have loaded the image entirely */ | 2267 | /* Free only if we have loaded the image entirely */ |
2294 | if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { | 2268 | if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { |
2295 | memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); | 2269 | memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); |
@@ -2310,13 +2284,13 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf) | |||
2310 | { | 2284 | { |
2311 | void *kaddr1, *kaddr2; | 2285 | void *kaddr1, *kaddr2; |
2312 | 2286 | ||
2313 | kaddr1 = kmap_atomic(p1); | 2287 | kaddr1 = kmap_atomic(p1, KM_USER0); |
2314 | kaddr2 = kmap_atomic(p2); | 2288 | kaddr2 = kmap_atomic(p2, KM_USER1); |
2315 | copy_page(buf, kaddr1); | 2289 | copy_page(buf, kaddr1); |
2316 | copy_page(kaddr1, kaddr2); | 2290 | copy_page(kaddr1, kaddr2); |
2317 | copy_page(kaddr2, buf); | 2291 | copy_page(kaddr2, buf); |
2318 | kunmap_atomic(kaddr2); | 2292 | kunmap_atomic(kaddr2, KM_USER1); |
2319 | kunmap_atomic(kaddr1); | 2293 | kunmap_atomic(kaddr1, KM_USER0); |
2320 | } | 2294 | } |
2321 | 2295 | ||
2322 | /** | 2296 | /** |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c8b7446b27d..a6f6e3114a2 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -21,15 +21,16 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/export.h> | ||
25 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
26 | #include <linux/syscore_ops.h> | 25 | #include <linux/syscore_ops.h> |
27 | #include <linux/ftrace.h> | ||
28 | #include <trace/events/power.h> | 26 | #include <trace/events/power.h> |
29 | 27 | ||
30 | #include "power.h" | 28 | #include "power.h" |
31 | 29 | ||
32 | const char *const pm_states[PM_SUSPEND_MAX] = { | 30 | const char *const pm_states[PM_SUSPEND_MAX] = { |
31 | #ifdef CONFIG_EARLYSUSPEND | ||
32 | [PM_SUSPEND_ON] = "on", | ||
33 | #endif | ||
33 | [PM_SUSPEND_STANDBY] = "standby", | 34 | [PM_SUSPEND_STANDBY] = "standby", |
34 | [PM_SUSPEND_MEM] = "mem", | 35 | [PM_SUSPEND_MEM] = "mem", |
35 | }; | 36 | }; |
@@ -37,14 +38,14 @@ const char *const pm_states[PM_SUSPEND_MAX] = { | |||
37 | static const struct platform_suspend_ops *suspend_ops; | 38 | static const struct platform_suspend_ops *suspend_ops; |
38 | 39 | ||
39 | /** | 40 | /** |
40 | * suspend_set_ops - Set the global suspend method table. | 41 | * suspend_set_ops - Set the global suspend method table. |
41 | * @ops: Suspend operations to use. | 42 | * @ops: Pointer to ops structure. |
42 | */ | 43 | */ |
43 | void suspend_set_ops(const struct platform_suspend_ops *ops) | 44 | void suspend_set_ops(const struct platform_suspend_ops *ops) |
44 | { | 45 | { |
45 | lock_system_sleep(); | 46 | mutex_lock(&pm_mutex); |
46 | suspend_ops = ops; | 47 | suspend_ops = ops; |
47 | unlock_system_sleep(); | 48 | mutex_unlock(&pm_mutex); |
48 | } | 49 | } |
49 | EXPORT_SYMBOL_GPL(suspend_set_ops); | 50 | EXPORT_SYMBOL_GPL(suspend_set_ops); |
50 | 51 | ||
@@ -58,11 +59,11 @@ bool valid_state(suspend_state_t state) | |||
58 | } | 59 | } |
59 | 60 | ||
60 | /** | 61 | /** |
61 | * suspend_valid_only_mem - Generic memory-only valid callback. | 62 | * suspend_valid_only_mem - generic memory-only valid callback |
62 | * | 63 | * |
63 | * Platform drivers that implement mem suspend only and only need to check for | 64 | * Platform drivers that implement mem suspend only and only need |
64 | * that in their .valid() callback can use this instead of rolling their own | 65 | * to check for that in their .valid callback can use this instead |
65 | * .valid() callback. | 66 | * of rolling their own .valid callback. |
66 | */ | 67 | */ |
67 | int suspend_valid_only_mem(suspend_state_t state) | 68 | int suspend_valid_only_mem(suspend_state_t state) |
68 | { | 69 | { |
@@ -83,11 +84,10 @@ static int suspend_test(int level) | |||
83 | } | 84 | } |
84 | 85 | ||
85 | /** | 86 | /** |
86 | * suspend_prepare - Prepare for entering system sleep state. | 87 | * suspend_prepare - Do prep work before entering low-power state. |
87 | * | 88 | * |
88 | * Common code run for every system sleep state that can be entered (except for | 89 | * This is common code that is called for each state that we're entering. |
89 | * hibernation). Run suspend notifiers, allocate the "suspend" console and | 90 | * Run suspend notifiers, allocate a console and stop all processes. |
90 | * freeze processes. | ||
91 | */ | 91 | */ |
92 | static int suspend_prepare(void) | 92 | static int suspend_prepare(void) |
93 | { | 93 | { |
@@ -102,12 +102,16 @@ static int suspend_prepare(void) | |||
102 | if (error) | 102 | if (error) |
103 | goto Finish; | 103 | goto Finish; |
104 | 104 | ||
105 | error = usermodehelper_disable(); | ||
106 | if (error) | ||
107 | goto Finish; | ||
108 | |||
105 | error = suspend_freeze_processes(); | 109 | error = suspend_freeze_processes(); |
106 | if (!error) | 110 | if (!error) |
107 | return 0; | 111 | return 0; |
108 | 112 | ||
109 | suspend_stats.failed_freeze++; | 113 | suspend_thaw_processes(); |
110 | dpm_save_failed_step(SUSPEND_FREEZE); | 114 | usermodehelper_enable(); |
111 | Finish: | 115 | Finish: |
112 | pm_notifier_call_chain(PM_POST_SUSPEND); | 116 | pm_notifier_call_chain(PM_POST_SUSPEND); |
113 | pm_restore_console(); | 117 | pm_restore_console(); |
@@ -127,9 +131,9 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | |||
127 | } | 131 | } |
128 | 132 | ||
129 | /** | 133 | /** |
130 | * suspend_enter - Make the system enter the given sleep state. | 134 | * suspend_enter - enter the desired system sleep state. |
131 | * @state: System sleep state to enter. | 135 | * @state: State to enter |
132 | * @wakeup: Returns information that the sleep state should not be re-entered. | 136 | * @wakeup: Returns information that suspend should not be entered again. |
133 | * | 137 | * |
134 | * This function should be called after devices have been suspended. | 138 | * This function should be called after devices have been suspended. |
135 | */ | 139 | */ |
@@ -143,7 +147,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
143 | goto Platform_finish; | 147 | goto Platform_finish; |
144 | } | 148 | } |
145 | 149 | ||
146 | error = dpm_suspend_end(PMSG_SUSPEND); | 150 | error = dpm_suspend_noirq(PMSG_SUSPEND); |
147 | if (error) { | 151 | if (error) { |
148 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 152 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
149 | goto Platform_finish; | 153 | goto Platform_finish; |
@@ -185,7 +189,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
185 | if (suspend_ops->wake) | 189 | if (suspend_ops->wake) |
186 | suspend_ops->wake(); | 190 | suspend_ops->wake(); |
187 | 191 | ||
188 | dpm_resume_start(PMSG_RESUME); | 192 | dpm_resume_noirq(PMSG_RESUME); |
189 | 193 | ||
190 | Platform_finish: | 194 | Platform_finish: |
191 | if (suspend_ops->finish) | 195 | if (suspend_ops->finish) |
@@ -195,8 +199,9 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
195 | } | 199 | } |
196 | 200 | ||
197 | /** | 201 | /** |
198 | * suspend_devices_and_enter - Suspend devices and enter system sleep state. | 202 | * suspend_devices_and_enter - suspend devices and enter the desired system |
199 | * @state: System sleep state to enter. | 203 | * sleep state. |
204 | * @state: state to enter | ||
200 | */ | 205 | */ |
201 | int suspend_devices_and_enter(suspend_state_t state) | 206 | int suspend_devices_and_enter(suspend_state_t state) |
202 | { | 207 | { |
@@ -213,7 +218,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
213 | goto Close; | 218 | goto Close; |
214 | } | 219 | } |
215 | suspend_console(); | 220 | suspend_console(); |
216 | ftrace_stop(); | ||
217 | suspend_test_start(); | 221 | suspend_test_start(); |
218 | error = dpm_suspend_start(PMSG_SUSPEND); | 222 | error = dpm_suspend_start(PMSG_SUSPEND); |
219 | if (error) { | 223 | if (error) { |
@@ -233,7 +237,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
233 | suspend_test_start(); | 237 | suspend_test_start(); |
234 | dpm_resume_end(PMSG_RESUME); | 238 | dpm_resume_end(PMSG_RESUME); |
235 | suspend_test_finish("resume devices"); | 239 | suspend_test_finish("resume devices"); |
236 | ftrace_start(); | ||
237 | resume_console(); | 240 | resume_console(); |
238 | Close: | 241 | Close: |
239 | if (suspend_ops->end) | 242 | if (suspend_ops->end) |
@@ -248,27 +251,30 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
248 | } | 251 | } |
249 | 252 | ||
250 | /** | 253 | /** |
251 | * suspend_finish - Clean up before finishing the suspend sequence. | 254 | * suspend_finish - Do final work before exiting suspend sequence. |
252 | * | 255 | * |
253 | * Call platform code to clean up, restart processes, and free the console that | 256 | * Call platform code to clean up, restart processes, and free the |
254 | * we've allocated. This routine is not called for hibernation. | 257 | * console that we've allocated. This is not called for suspend-to-disk. |
255 | */ | 258 | */ |
256 | static void suspend_finish(void) | 259 | static void suspend_finish(void) |
257 | { | 260 | { |
258 | suspend_thaw_processes(); | 261 | suspend_thaw_processes(); |
262 | usermodehelper_enable(); | ||
259 | pm_notifier_call_chain(PM_POST_SUSPEND); | 263 | pm_notifier_call_chain(PM_POST_SUSPEND); |
260 | pm_restore_console(); | 264 | pm_restore_console(); |
261 | } | 265 | } |
262 | 266 | ||
263 | /** | 267 | /** |
264 | * enter_state - Do common work needed to enter system sleep state. | 268 | * enter_state - Do common work of entering low-power state. |
265 | * @state: System sleep state to enter. | 269 | * @state: pm_state structure for state we're entering. |
266 | * | 270 | * |
267 | * Make sure that no one else is trying to put the system into a sleep state. | 271 | * Make sure we're the only ones trying to enter a sleep state. Fail |
268 | * Fail if that's not the case. Otherwise, prepare for system suspend, make the | 272 | * if someone has beat us to it, since we don't want anything weird to |
269 | * system enter the given sleep state and clean up after wakeup. | 273 | * happen when we wake up. |
274 | * Then, do the setup for suspend, enter the state, and cleaup (after | ||
275 | * we've woken up). | ||
270 | */ | 276 | */ |
271 | static int enter_state(suspend_state_t state) | 277 | int enter_state(suspend_state_t state) |
272 | { | 278 | { |
273 | int error; | 279 | int error; |
274 | 280 | ||
@@ -304,26 +310,16 @@ static int enter_state(suspend_state_t state) | |||
304 | } | 310 | } |
305 | 311 | ||
306 | /** | 312 | /** |
307 | * pm_suspend - Externally visible function for suspending the system. | 313 | * pm_suspend - Externally visible function for suspending system. |
308 | * @state: System sleep state to enter. | 314 | * @state: Enumerated value of state to enter. |
309 | * | 315 | * |
310 | * Check if the value of @state represents one of the supported states, | 316 | * Determine whether or not value is within range, get state |
311 | * execute enter_state() and update system suspend statistics. | 317 | * structure, and enter (above). |
312 | */ | 318 | */ |
313 | int pm_suspend(suspend_state_t state) | 319 | int pm_suspend(suspend_state_t state) |
314 | { | 320 | { |
315 | int error; | 321 | if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) |
316 | 322 | return enter_state(state); | |
317 | if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) | 323 | return -EINVAL; |
318 | return -EINVAL; | ||
319 | |||
320 | error = enter_state(state); | ||
321 | if (error) { | ||
322 | suspend_stats.fail++; | ||
323 | dpm_save_failed_errno(error); | ||
324 | } else { | ||
325 | suspend_stats.success++; | ||
326 | } | ||
327 | return error; | ||
328 | } | 324 | } |
329 | EXPORT_SYMBOL(pm_suspend); | 325 | EXPORT_SYMBOL(pm_suspend); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c33ed20041..7c97c3a0eee 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
9 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> | 9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/genhd.h> | 19 | #include <linux/genhd.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/buffer_head.h> | ||
21 | #include <linux/bio.h> | 22 | #include <linux/bio.h> |
22 | #include <linux/blkdev.h> | 23 | #include <linux/blkdev.h> |
23 | #include <linux/swap.h> | 24 | #include <linux/swap.h> |
@@ -26,10 +27,6 @@ | |||
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | #include <linux/lzo.h> | 28 | #include <linux/lzo.h> |
28 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
29 | #include <linux/cpumask.h> | ||
30 | #include <linux/atomic.h> | ||
31 | #include <linux/kthread.h> | ||
32 | #include <linux/crc32.h> | ||
33 | 30 | ||
34 | #include "power.h" | 31 | #include "power.h" |
35 | 32 | ||
@@ -46,38 +43,17 @@ | |||
46 | * allocated and populated one at a time, so we only need one memory | 43 | * allocated and populated one at a time, so we only need one memory |
47 | * page to set up the entire structure. | 44 | * page to set up the entire structure. |
48 | * | 45 | * |
49 | * During resume we pick up all swap_map_page structures into a list. | 46 | * During resume we also only need to use one swap_map_page structure |
47 | * at a time. | ||
50 | */ | 48 | */ |
51 | 49 | ||
52 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) | 50 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
53 | 51 | ||
54 | /* | ||
55 | * Number of free pages that are not high. | ||
56 | */ | ||
57 | static inline unsigned long low_free_pages(void) | ||
58 | { | ||
59 | return nr_free_pages() - nr_free_highpages(); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Number of pages required to be kept free while writing the image. Always | ||
64 | * half of all available low pages before the writing starts. | ||
65 | */ | ||
66 | static inline unsigned long reqd_free_pages(void) | ||
67 | { | ||
68 | return low_free_pages() / 2; | ||
69 | } | ||
70 | |||
71 | struct swap_map_page { | 52 | struct swap_map_page { |
72 | sector_t entries[MAP_PAGE_ENTRIES]; | 53 | sector_t entries[MAP_PAGE_ENTRIES]; |
73 | sector_t next_swap; | 54 | sector_t next_swap; |
74 | }; | 55 | }; |
75 | 56 | ||
76 | struct swap_map_page_list { | ||
77 | struct swap_map_page *map; | ||
78 | struct swap_map_page_list *next; | ||
79 | }; | ||
80 | |||
81 | /** | 57 | /** |
82 | * The swap_map_handle structure is used for handling swap in | 58 | * The swap_map_handle structure is used for handling swap in |
83 | * a file-alike way | 59 | * a file-alike way |
@@ -85,18 +61,13 @@ struct swap_map_page_list { | |||
85 | 61 | ||
86 | struct swap_map_handle { | 62 | struct swap_map_handle { |
87 | struct swap_map_page *cur; | 63 | struct swap_map_page *cur; |
88 | struct swap_map_page_list *maps; | ||
89 | sector_t cur_swap; | 64 | sector_t cur_swap; |
90 | sector_t first_sector; | 65 | sector_t first_sector; |
91 | unsigned int k; | 66 | unsigned int k; |
92 | unsigned long reqd_free_pages; | ||
93 | u32 crc32; | ||
94 | }; | 67 | }; |
95 | 68 | ||
96 | struct swsusp_header { | 69 | struct swsusp_header { |
97 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - | 70 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)]; |
98 | sizeof(u32)]; | ||
99 | u32 crc32; | ||
100 | sector_t image; | 71 | sector_t image; |
101 | unsigned int flags; /* Flags to pass to the "boot" kernel */ | 72 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
102 | char orig_sig[10]; | 73 | char orig_sig[10]; |
@@ -126,7 +97,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) | |||
126 | 97 | ||
127 | /* Figure out where to put the new node */ | 98 | /* Figure out where to put the new node */ |
128 | while (*new) { | 99 | while (*new) { |
129 | ext = rb_entry(*new, struct swsusp_extent, node); | 100 | ext = container_of(*new, struct swsusp_extent, node); |
130 | parent = *new; | 101 | parent = *new; |
131 | if (swap_offset < ext->start) { | 102 | if (swap_offset < ext->start) { |
132 | /* Try to merge */ | 103 | /* Try to merge */ |
@@ -228,8 +199,6 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
228 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); | 199 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); |
229 | swsusp_header->image = handle->first_sector; | 200 | swsusp_header->image = handle->first_sector; |
230 | swsusp_header->flags = flags; | 201 | swsusp_header->flags = flags; |
231 | if (flags & SF_CRC32_MODE) | ||
232 | swsusp_header->crc32 = handle->crc32; | ||
233 | error = hib_bio_write_page(swsusp_resume_block, | 202 | error = hib_bio_write_page(swsusp_resume_block, |
234 | swsusp_header, NULL); | 203 | swsusp_header, NULL); |
235 | } else { | 204 | } else { |
@@ -276,30 +245,18 @@ static int swsusp_swap_check(void) | |||
276 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | 245 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) |
277 | { | 246 | { |
278 | void *src; | 247 | void *src; |
279 | int ret; | ||
280 | 248 | ||
281 | if (!offset) | 249 | if (!offset) |
282 | return -ENOSPC; | 250 | return -ENOSPC; |
283 | 251 | ||
284 | if (bio_chain) { | 252 | if (bio_chain) { |
285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | | 253 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
286 | __GFP_NORETRY); | ||
287 | if (src) { | 254 | if (src) { |
288 | copy_page(src, buf); | 255 | copy_page(src, buf); |
289 | } else { | 256 | } else { |
290 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ | 257 | WARN_ON_ONCE(1); |
291 | if (ret) | 258 | bio_chain = NULL; /* Go synchronous */ |
292 | return ret; | 259 | src = buf; |
293 | src = (void *)__get_free_page(__GFP_WAIT | | ||
294 | __GFP_NOWARN | | ||
295 | __GFP_NORETRY); | ||
296 | if (src) { | ||
297 | copy_page(src, buf); | ||
298 | } else { | ||
299 | WARN_ON_ONCE(1); | ||
300 | bio_chain = NULL; /* Go synchronous */ | ||
301 | src = buf; | ||
302 | } | ||
303 | } | 260 | } |
304 | } else { | 261 | } else { |
305 | src = buf; | 262 | src = buf; |
@@ -336,7 +293,6 @@ static int get_swap_writer(struct swap_map_handle *handle) | |||
336 | goto err_rel; | 293 | goto err_rel; |
337 | } | 294 | } |
338 | handle->k = 0; | 295 | handle->k = 0; |
339 | handle->reqd_free_pages = reqd_free_pages(); | ||
340 | handle->first_sector = handle->cur_swap; | 296 | handle->first_sector = handle->cur_swap; |
341 | return 0; | 297 | return 0; |
342 | err_rel: | 298 | err_rel: |
@@ -360,27 +316,19 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, | |||
360 | return error; | 316 | return error; |
361 | handle->cur->entries[handle->k++] = offset; | 317 | handle->cur->entries[handle->k++] = offset; |
362 | if (handle->k >= MAP_PAGE_ENTRIES) { | 318 | if (handle->k >= MAP_PAGE_ENTRIES) { |
319 | error = hib_wait_on_bio_chain(bio_chain); | ||
320 | if (error) | ||
321 | goto out; | ||
363 | offset = alloc_swapdev_block(root_swap); | 322 | offset = alloc_swapdev_block(root_swap); |
364 | if (!offset) | 323 | if (!offset) |
365 | return -ENOSPC; | 324 | return -ENOSPC; |
366 | handle->cur->next_swap = offset; | 325 | handle->cur->next_swap = offset; |
367 | error = write_page(handle->cur, handle->cur_swap, bio_chain); | 326 | error = write_page(handle->cur, handle->cur_swap, NULL); |
368 | if (error) | 327 | if (error) |
369 | goto out; | 328 | goto out; |
370 | clear_page(handle->cur); | 329 | clear_page(handle->cur); |
371 | handle->cur_swap = offset; | 330 | handle->cur_swap = offset; |
372 | handle->k = 0; | 331 | handle->k = 0; |
373 | |||
374 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { | ||
375 | error = hib_wait_on_bio_chain(bio_chain); | ||
376 | if (error) | ||
377 | goto out; | ||
378 | /* | ||
379 | * Recalculate the number of required free pages, to | ||
380 | * make sure we never take more than half. | ||
381 | */ | ||
382 | handle->reqd_free_pages = reqd_free_pages(); | ||
383 | } | ||
384 | } | 332 | } |
385 | out: | 333 | out: |
386 | return error; | 334 | return error; |
@@ -424,14 +372,6 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
424 | LZO_HEADER, PAGE_SIZE) | 372 | LZO_HEADER, PAGE_SIZE) |
425 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) | 373 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) |
426 | 374 | ||
427 | /* Maximum number of threads for compression/decompression. */ | ||
428 | #define LZO_THREADS 3 | ||
429 | |||
430 | /* Minimum/maximum number of pages for read buffering. */ | ||
431 | #define LZO_MIN_RD_PAGES 1024 | ||
432 | #define LZO_MAX_RD_PAGES 8192 | ||
433 | |||
434 | |||
435 | /** | 375 | /** |
436 | * save_image - save the suspend image data | 376 | * save_image - save the suspend image data |
437 | */ | 377 | */ |
@@ -448,9 +388,9 @@ static int save_image(struct swap_map_handle *handle, | |||
448 | struct timeval start; | 388 | struct timeval start; |
449 | struct timeval stop; | 389 | struct timeval stop; |
450 | 390 | ||
451 | printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n", | 391 | printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", |
452 | nr_to_write); | 392 | nr_to_write); |
453 | m = nr_to_write / 10; | 393 | m = nr_to_write / 100; |
454 | if (!m) | 394 | if (!m) |
455 | m = 1; | 395 | m = 1; |
456 | nr_pages = 0; | 396 | nr_pages = 0; |
@@ -464,8 +404,7 @@ static int save_image(struct swap_map_handle *handle, | |||
464 | if (ret) | 404 | if (ret) |
465 | break; | 405 | break; |
466 | if (!(nr_pages % m)) | 406 | if (!(nr_pages % m)) |
467 | printk(KERN_INFO "PM: Image saving progress: %3d%%\n", | 407 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); |
468 | nr_pages / m * 10); | ||
469 | nr_pages++; | 408 | nr_pages++; |
470 | } | 409 | } |
471 | err2 = hib_wait_on_bio_chain(&bio); | 410 | err2 = hib_wait_on_bio_chain(&bio); |
@@ -473,97 +412,13 @@ static int save_image(struct swap_map_handle *handle, | |||
473 | if (!ret) | 412 | if (!ret) |
474 | ret = err2; | 413 | ret = err2; |
475 | if (!ret) | 414 | if (!ret) |
476 | printk(KERN_INFO "PM: Image saving done.\n"); | 415 | printk(KERN_CONT "\b\b\b\bdone\n"); |
416 | else | ||
417 | printk(KERN_CONT "\n"); | ||
477 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 418 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
478 | return ret; | 419 | return ret; |
479 | } | 420 | } |
480 | 421 | ||
481 | /** | ||
482 | * Structure used for CRC32. | ||
483 | */ | ||
484 | struct crc_data { | ||
485 | struct task_struct *thr; /* thread */ | ||
486 | atomic_t ready; /* ready to start flag */ | ||
487 | atomic_t stop; /* ready to stop flag */ | ||
488 | unsigned run_threads; /* nr current threads */ | ||
489 | wait_queue_head_t go; /* start crc update */ | ||
490 | wait_queue_head_t done; /* crc update done */ | ||
491 | u32 *crc32; /* points to handle's crc32 */ | ||
492 | size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ | ||
493 | unsigned char *unc[LZO_THREADS]; /* uncompressed data */ | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * CRC32 update function that runs in its own thread. | ||
498 | */ | ||
499 | static int crc32_threadfn(void *data) | ||
500 | { | ||
501 | struct crc_data *d = data; | ||
502 | unsigned i; | ||
503 | |||
504 | while (1) { | ||
505 | wait_event(d->go, atomic_read(&d->ready) || | ||
506 | kthread_should_stop()); | ||
507 | if (kthread_should_stop()) { | ||
508 | d->thr = NULL; | ||
509 | atomic_set(&d->stop, 1); | ||
510 | wake_up(&d->done); | ||
511 | break; | ||
512 | } | ||
513 | atomic_set(&d->ready, 0); | ||
514 | |||
515 | for (i = 0; i < d->run_threads; i++) | ||
516 | *d->crc32 = crc32_le(*d->crc32, | ||
517 | d->unc[i], *d->unc_len[i]); | ||
518 | atomic_set(&d->stop, 1); | ||
519 | wake_up(&d->done); | ||
520 | } | ||
521 | return 0; | ||
522 | } | ||
523 | /** | ||
524 | * Structure used for LZO data compression. | ||
525 | */ | ||
526 | struct cmp_data { | ||
527 | struct task_struct *thr; /* thread */ | ||
528 | atomic_t ready; /* ready to start flag */ | ||
529 | atomic_t stop; /* ready to stop flag */ | ||
530 | int ret; /* return code */ | ||
531 | wait_queue_head_t go; /* start compression */ | ||
532 | wait_queue_head_t done; /* compression done */ | ||
533 | size_t unc_len; /* uncompressed length */ | ||
534 | size_t cmp_len; /* compressed length */ | ||
535 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | ||
536 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | ||
537 | unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ | ||
538 | }; | ||
539 | |||
540 | /** | ||
541 | * Compression function that runs in its own thread. | ||
542 | */ | ||
543 | static int lzo_compress_threadfn(void *data) | ||
544 | { | ||
545 | struct cmp_data *d = data; | ||
546 | |||
547 | while (1) { | ||
548 | wait_event(d->go, atomic_read(&d->ready) || | ||
549 | kthread_should_stop()); | ||
550 | if (kthread_should_stop()) { | ||
551 | d->thr = NULL; | ||
552 | d->ret = -1; | ||
553 | atomic_set(&d->stop, 1); | ||
554 | wake_up(&d->done); | ||
555 | break; | ||
556 | } | ||
557 | atomic_set(&d->ready, 0); | ||
558 | |||
559 | d->ret = lzo1x_1_compress(d->unc, d->unc_len, | ||
560 | d->cmp + LZO_HEADER, &d->cmp_len, | ||
561 | d->wrk); | ||
562 | atomic_set(&d->stop, 1); | ||
563 | wake_up(&d->done); | ||
564 | } | ||
565 | return 0; | ||
566 | } | ||
567 | 422 | ||
568 | /** | 423 | /** |
569 | * save_image_lzo - Save the suspend image data compressed with LZO. | 424 | * save_image_lzo - Save the suspend image data compressed with LZO. |
@@ -582,179 +437,98 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
582 | struct bio *bio; | 437 | struct bio *bio; |
583 | struct timeval start; | 438 | struct timeval start; |
584 | struct timeval stop; | 439 | struct timeval stop; |
585 | size_t off; | 440 | size_t off, unc_len, cmp_len; |
586 | unsigned thr, run_threads, nr_threads; | 441 | unsigned char *unc, *cmp, *wrk, *page; |
587 | unsigned char *page = NULL; | ||
588 | struct cmp_data *data = NULL; | ||
589 | struct crc_data *crc = NULL; | ||
590 | |||
591 | /* | ||
592 | * We'll limit the number of threads for compression to limit memory | ||
593 | * footprint. | ||
594 | */ | ||
595 | nr_threads = num_online_cpus() - 1; | ||
596 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | ||
597 | 442 | ||
598 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 443 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
599 | if (!page) { | 444 | if (!page) { |
600 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 445 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
601 | ret = -ENOMEM; | 446 | return -ENOMEM; |
602 | goto out_clean; | ||
603 | } | ||
604 | |||
605 | data = vmalloc(sizeof(*data) * nr_threads); | ||
606 | if (!data) { | ||
607 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | ||
608 | ret = -ENOMEM; | ||
609 | goto out_clean; | ||
610 | } | 447 | } |
611 | for (thr = 0; thr < nr_threads; thr++) | ||
612 | memset(&data[thr], 0, offsetof(struct cmp_data, go)); | ||
613 | 448 | ||
614 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 449 | wrk = vmalloc(LZO1X_1_MEM_COMPRESS); |
615 | if (!crc) { | 450 | if (!wrk) { |
616 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 451 | printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); |
617 | ret = -ENOMEM; | 452 | free_page((unsigned long)page); |
618 | goto out_clean; | 453 | return -ENOMEM; |
619 | } | ||
620 | memset(crc, 0, offsetof(struct crc_data, go)); | ||
621 | |||
622 | /* | ||
623 | * Start the compression threads. | ||
624 | */ | ||
625 | for (thr = 0; thr < nr_threads; thr++) { | ||
626 | init_waitqueue_head(&data[thr].go); | ||
627 | init_waitqueue_head(&data[thr].done); | ||
628 | |||
629 | data[thr].thr = kthread_run(lzo_compress_threadfn, | ||
630 | &data[thr], | ||
631 | "image_compress/%u", thr); | ||
632 | if (IS_ERR(data[thr].thr)) { | ||
633 | data[thr].thr = NULL; | ||
634 | printk(KERN_ERR | ||
635 | "PM: Cannot start compression threads\n"); | ||
636 | ret = -ENOMEM; | ||
637 | goto out_clean; | ||
638 | } | ||
639 | } | 454 | } |
640 | 455 | ||
641 | /* | 456 | unc = vmalloc(LZO_UNC_SIZE); |
642 | * Start the CRC32 thread. | 457 | if (!unc) { |
643 | */ | 458 | printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); |
644 | init_waitqueue_head(&crc->go); | 459 | vfree(wrk); |
645 | init_waitqueue_head(&crc->done); | 460 | free_page((unsigned long)page); |
646 | 461 | return -ENOMEM; | |
647 | handle->crc32 = 0; | ||
648 | crc->crc32 = &handle->crc32; | ||
649 | for (thr = 0; thr < nr_threads; thr++) { | ||
650 | crc->unc[thr] = data[thr].unc; | ||
651 | crc->unc_len[thr] = &data[thr].unc_len; | ||
652 | } | 462 | } |
653 | 463 | ||
654 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 464 | cmp = vmalloc(LZO_CMP_SIZE); |
655 | if (IS_ERR(crc->thr)) { | 465 | if (!cmp) { |
656 | crc->thr = NULL; | 466 | printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); |
657 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 467 | vfree(unc); |
658 | ret = -ENOMEM; | 468 | vfree(wrk); |
659 | goto out_clean; | 469 | free_page((unsigned long)page); |
470 | return -ENOMEM; | ||
660 | } | 471 | } |
661 | 472 | ||
662 | /* | ||
663 | * Adjust the number of required free pages after all allocations have | ||
664 | * been done. We don't want to run out of pages when writing. | ||
665 | */ | ||
666 | handle->reqd_free_pages = reqd_free_pages(); | ||
667 | |||
668 | printk(KERN_INFO | 473 | printk(KERN_INFO |
669 | "PM: Using %u thread(s) for compression.\n" | 474 | "PM: Compressing and saving image data (%u pages) ... ", |
670 | "PM: Compressing and saving image data (%u pages)...\n", | 475 | nr_to_write); |
671 | nr_threads, nr_to_write); | 476 | m = nr_to_write / 100; |
672 | m = nr_to_write / 10; | ||
673 | if (!m) | 477 | if (!m) |
674 | m = 1; | 478 | m = 1; |
675 | nr_pages = 0; | 479 | nr_pages = 0; |
676 | bio = NULL; | 480 | bio = NULL; |
677 | do_gettimeofday(&start); | 481 | do_gettimeofday(&start); |
678 | for (;;) { | 482 | for (;;) { |
679 | for (thr = 0; thr < nr_threads; thr++) { | 483 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { |
680 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { | 484 | ret = snapshot_read_next(snapshot); |
681 | ret = snapshot_read_next(snapshot); | 485 | if (ret < 0) |
682 | if (ret < 0) | 486 | goto out_finish; |
683 | goto out_finish; | 487 | |
684 | 488 | if (!ret) | |
685 | if (!ret) | ||
686 | break; | ||
687 | |||
688 | memcpy(data[thr].unc + off, | ||
689 | data_of(*snapshot), PAGE_SIZE); | ||
690 | |||
691 | if (!(nr_pages % m)) | ||
692 | printk(KERN_INFO | ||
693 | "PM: Image saving progress: " | ||
694 | "%3d%%\n", | ||
695 | nr_pages / m * 10); | ||
696 | nr_pages++; | ||
697 | } | ||
698 | if (!off) | ||
699 | break; | 489 | break; |
700 | 490 | ||
701 | data[thr].unc_len = off; | 491 | memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); |
702 | 492 | ||
703 | atomic_set(&data[thr].ready, 1); | 493 | if (!(nr_pages % m)) |
704 | wake_up(&data[thr].go); | 494 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); |
495 | nr_pages++; | ||
705 | } | 496 | } |
706 | 497 | ||
707 | if (!thr) | 498 | if (!off) |
708 | break; | 499 | break; |
709 | 500 | ||
710 | crc->run_threads = thr; | 501 | unc_len = off; |
711 | atomic_set(&crc->ready, 1); | 502 | ret = lzo1x_1_compress(unc, unc_len, |
712 | wake_up(&crc->go); | 503 | cmp + LZO_HEADER, &cmp_len, wrk); |
504 | if (ret < 0) { | ||
505 | printk(KERN_ERR "PM: LZO compression failed\n"); | ||
506 | break; | ||
507 | } | ||
713 | 508 | ||
714 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { | 509 | if (unlikely(!cmp_len || |
715 | wait_event(data[thr].done, | 510 | cmp_len > lzo1x_worst_compress(unc_len))) { |
716 | atomic_read(&data[thr].stop)); | 511 | printk(KERN_ERR "PM: Invalid LZO compressed length\n"); |
717 | atomic_set(&data[thr].stop, 0); | 512 | ret = -1; |
513 | break; | ||
514 | } | ||
718 | 515 | ||
719 | ret = data[thr].ret; | 516 | *(size_t *)cmp = cmp_len; |
720 | 517 | ||
721 | if (ret < 0) { | 518 | /* |
722 | printk(KERN_ERR "PM: LZO compression failed\n"); | 519 | * Given we are writing one page at a time to disk, we copy |
723 | goto out_finish; | 520 | * that much from the buffer, although the last bit will likely |
724 | } | 521 | * be smaller than full page. This is OK - we saved the length |
522 | * of the compressed data, so any garbage at the end will be | ||
523 | * discarded when we read it. | ||
524 | */ | ||
525 | for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { | ||
526 | memcpy(page, cmp + off, PAGE_SIZE); | ||
725 | 527 | ||
726 | if (unlikely(!data[thr].cmp_len || | 528 | ret = swap_write_page(handle, page, &bio); |
727 | data[thr].cmp_len > | 529 | if (ret) |
728 | lzo1x_worst_compress(data[thr].unc_len))) { | ||
729 | printk(KERN_ERR | ||
730 | "PM: Invalid LZO compressed length\n"); | ||
731 | ret = -1; | ||
732 | goto out_finish; | 530 | goto out_finish; |
733 | } | ||
734 | |||
735 | *(size_t *)data[thr].cmp = data[thr].cmp_len; | ||
736 | |||
737 | /* | ||
738 | * Given we are writing one page at a time to disk, we | ||
739 | * copy that much from the buffer, although the last | ||
740 | * bit will likely be smaller than full page. This is | ||
741 | * OK - we saved the length of the compressed data, so | ||
742 | * any garbage at the end will be discarded when we | ||
743 | * read it. | ||
744 | */ | ||
745 | for (off = 0; | ||
746 | off < LZO_HEADER + data[thr].cmp_len; | ||
747 | off += PAGE_SIZE) { | ||
748 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); | ||
749 | |||
750 | ret = swap_write_page(handle, page, &bio); | ||
751 | if (ret) | ||
752 | goto out_finish; | ||
753 | } | ||
754 | } | 531 | } |
755 | |||
756 | wait_event(crc->done, atomic_read(&crc->stop)); | ||
757 | atomic_set(&crc->stop, 0); | ||
758 | } | 532 | } |
759 | 533 | ||
760 | out_finish: | 534 | out_finish: |
@@ -763,21 +537,15 @@ out_finish: | |||
763 | if (!ret) | 537 | if (!ret) |
764 | ret = err2; | 538 | ret = err2; |
765 | if (!ret) | 539 | if (!ret) |
766 | printk(KERN_INFO "PM: Image saving done.\n"); | 540 | printk(KERN_CONT "\b\b\b\bdone\n"); |
541 | else | ||
542 | printk(KERN_CONT "\n"); | ||
767 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 543 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
768 | out_clean: | 544 | |
769 | if (crc) { | 545 | vfree(cmp); |
770 | if (crc->thr) | 546 | vfree(unc); |
771 | kthread_stop(crc->thr); | 547 | vfree(wrk); |
772 | kfree(crc); | 548 | free_page((unsigned long)page); |
773 | } | ||
774 | if (data) { | ||
775 | for (thr = 0; thr < nr_threads; thr++) | ||
776 | if (data[thr].thr) | ||
777 | kthread_stop(data[thr].thr); | ||
778 | vfree(data); | ||
779 | } | ||
780 | if (page) free_page((unsigned long)page); | ||
781 | 549 | ||
782 | return ret; | 550 | return ret; |
783 | } | 551 | } |
@@ -796,7 +564,8 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags) | |||
796 | 564 | ||
797 | pr_debug("PM: Free swap pages: %u\n", free_swap); | 565 | pr_debug("PM: Free swap pages: %u\n", free_swap); |
798 | 566 | ||
799 | required = PAGES_FOR_IO + nr_pages; | 567 | required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ? |
568 | nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1); | ||
800 | return free_swap > required; | 569 | return free_swap > required; |
801 | } | 570 | } |
802 | 571 | ||
@@ -824,12 +593,10 @@ int swsusp_write(unsigned int flags) | |||
824 | printk(KERN_ERR "PM: Cannot get swap writer\n"); | 593 | printk(KERN_ERR "PM: Cannot get swap writer\n"); |
825 | return error; | 594 | return error; |
826 | } | 595 | } |
827 | if (flags & SF_NOCOMPRESS_MODE) { | 596 | if (!enough_swap(pages, flags)) { |
828 | if (!enough_swap(pages, flags)) { | 597 | printk(KERN_ERR "PM: Not enough free swap\n"); |
829 | printk(KERN_ERR "PM: Not enough free swap\n"); | 598 | error = -ENOSPC; |
830 | error = -ENOSPC; | 599 | goto out_finish; |
831 | goto out_finish; | ||
832 | } | ||
833 | } | 600 | } |
834 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); | 601 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
835 | error = snapshot_read_next(&snapshot); | 602 | error = snapshot_read_next(&snapshot); |
@@ -858,15 +625,8 @@ out_finish: | |||
858 | 625 | ||
859 | static void release_swap_reader(struct swap_map_handle *handle) | 626 | static void release_swap_reader(struct swap_map_handle *handle) |
860 | { | 627 | { |
861 | struct swap_map_page_list *tmp; | 628 | if (handle->cur) |
862 | 629 | free_page((unsigned long)handle->cur); | |
863 | while (handle->maps) { | ||
864 | if (handle->maps->map) | ||
865 | free_page((unsigned long)handle->maps->map); | ||
866 | tmp = handle->maps; | ||
867 | handle->maps = handle->maps->next; | ||
868 | kfree(tmp); | ||
869 | } | ||
870 | handle->cur = NULL; | 630 | handle->cur = NULL; |
871 | } | 631 | } |
872 | 632 | ||
@@ -874,46 +634,22 @@ static int get_swap_reader(struct swap_map_handle *handle, | |||
874 | unsigned int *flags_p) | 634 | unsigned int *flags_p) |
875 | { | 635 | { |
876 | int error; | 636 | int error; |
877 | struct swap_map_page_list *tmp, *last; | ||
878 | sector_t offset; | ||
879 | 637 | ||
880 | *flags_p = swsusp_header->flags; | 638 | *flags_p = swsusp_header->flags; |
881 | 639 | ||
882 | if (!swsusp_header->image) /* how can this happen? */ | 640 | if (!swsusp_header->image) /* how can this happen? */ |
883 | return -EINVAL; | 641 | return -EINVAL; |
884 | 642 | ||
885 | handle->cur = NULL; | 643 | handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); |
886 | last = handle->maps = NULL; | 644 | if (!handle->cur) |
887 | offset = swsusp_header->image; | 645 | return -ENOMEM; |
888 | while (offset) { | ||
889 | tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); | ||
890 | if (!tmp) { | ||
891 | release_swap_reader(handle); | ||
892 | return -ENOMEM; | ||
893 | } | ||
894 | memset(tmp, 0, sizeof(*tmp)); | ||
895 | if (!handle->maps) | ||
896 | handle->maps = tmp; | ||
897 | if (last) | ||
898 | last->next = tmp; | ||
899 | last = tmp; | ||
900 | |||
901 | tmp->map = (struct swap_map_page *) | ||
902 | __get_free_page(__GFP_WAIT | __GFP_HIGH); | ||
903 | if (!tmp->map) { | ||
904 | release_swap_reader(handle); | ||
905 | return -ENOMEM; | ||
906 | } | ||
907 | 646 | ||
908 | error = hib_bio_read_page(offset, tmp->map, NULL); | 647 | error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL); |
909 | if (error) { | 648 | if (error) { |
910 | release_swap_reader(handle); | 649 | release_swap_reader(handle); |
911 | return error; | 650 | return error; |
912 | } | ||
913 | offset = tmp->map->next_swap; | ||
914 | } | 651 | } |
915 | handle->k = 0; | 652 | handle->k = 0; |
916 | handle->cur = handle->maps->map; | ||
917 | return 0; | 653 | return 0; |
918 | } | 654 | } |
919 | 655 | ||
@@ -922,7 +658,6 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
922 | { | 658 | { |
923 | sector_t offset; | 659 | sector_t offset; |
924 | int error; | 660 | int error; |
925 | struct swap_map_page_list *tmp; | ||
926 | 661 | ||
927 | if (!handle->cur) | 662 | if (!handle->cur) |
928 | return -EINVAL; | 663 | return -EINVAL; |
@@ -933,15 +668,13 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
933 | if (error) | 668 | if (error) |
934 | return error; | 669 | return error; |
935 | if (++handle->k >= MAP_PAGE_ENTRIES) { | 670 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
671 | error = hib_wait_on_bio_chain(bio_chain); | ||
936 | handle->k = 0; | 672 | handle->k = 0; |
937 | free_page((unsigned long)handle->maps->map); | 673 | offset = handle->cur->next_swap; |
938 | tmp = handle->maps; | 674 | if (!offset) |
939 | handle->maps = handle->maps->next; | ||
940 | kfree(tmp); | ||
941 | if (!handle->maps) | ||
942 | release_swap_reader(handle); | 675 | release_swap_reader(handle); |
943 | else | 676 | else if (!error) |
944 | handle->cur = handle->maps->map; | 677 | error = hib_bio_read_page(offset, handle->cur, NULL); |
945 | } | 678 | } |
946 | return error; | 679 | return error; |
947 | } | 680 | } |
@@ -964,93 +697,49 @@ static int load_image(struct swap_map_handle *handle, | |||
964 | unsigned int nr_to_read) | 697 | unsigned int nr_to_read) |
965 | { | 698 | { |
966 | unsigned int m; | 699 | unsigned int m; |
967 | int ret = 0; | 700 | int error = 0; |
968 | struct timeval start; | 701 | struct timeval start; |
969 | struct timeval stop; | 702 | struct timeval stop; |
970 | struct bio *bio; | 703 | struct bio *bio; |
971 | int err2; | 704 | int err2; |
972 | unsigned nr_pages; | 705 | unsigned nr_pages; |
973 | 706 | ||
974 | printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", | 707 | printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", |
975 | nr_to_read); | 708 | nr_to_read); |
976 | m = nr_to_read / 10; | 709 | m = nr_to_read / 100; |
977 | if (!m) | 710 | if (!m) |
978 | m = 1; | 711 | m = 1; |
979 | nr_pages = 0; | 712 | nr_pages = 0; |
980 | bio = NULL; | 713 | bio = NULL; |
981 | do_gettimeofday(&start); | 714 | do_gettimeofday(&start); |
982 | for ( ; ; ) { | 715 | for ( ; ; ) { |
983 | ret = snapshot_write_next(snapshot); | 716 | error = snapshot_write_next(snapshot); |
984 | if (ret <= 0) | 717 | if (error <= 0) |
985 | break; | 718 | break; |
986 | ret = swap_read_page(handle, data_of(*snapshot), &bio); | 719 | error = swap_read_page(handle, data_of(*snapshot), &bio); |
987 | if (ret) | 720 | if (error) |
988 | break; | 721 | break; |
989 | if (snapshot->sync_read) | 722 | if (snapshot->sync_read) |
990 | ret = hib_wait_on_bio_chain(&bio); | 723 | error = hib_wait_on_bio_chain(&bio); |
991 | if (ret) | 724 | if (error) |
992 | break; | 725 | break; |
993 | if (!(nr_pages % m)) | 726 | if (!(nr_pages % m)) |
994 | printk(KERN_INFO "PM: Image loading progress: %3d%%\n", | 727 | printk("\b\b\b\b%3d%%", nr_pages / m); |
995 | nr_pages / m * 10); | ||
996 | nr_pages++; | 728 | nr_pages++; |
997 | } | 729 | } |
998 | err2 = hib_wait_on_bio_chain(&bio); | 730 | err2 = hib_wait_on_bio_chain(&bio); |
999 | do_gettimeofday(&stop); | 731 | do_gettimeofday(&stop); |
1000 | if (!ret) | 732 | if (!error) |
1001 | ret = err2; | 733 | error = err2; |
1002 | if (!ret) { | 734 | if (!error) { |
1003 | printk(KERN_INFO "PM: Image loading done.\n"); | 735 | printk("\b\b\b\bdone\n"); |
1004 | snapshot_write_finalize(snapshot); | 736 | snapshot_write_finalize(snapshot); |
1005 | if (!snapshot_image_loaded(snapshot)) | 737 | if (!snapshot_image_loaded(snapshot)) |
1006 | ret = -ENODATA; | 738 | error = -ENODATA; |
1007 | } | 739 | } else |
740 | printk("\n"); | ||
1008 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 741 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
1009 | return ret; | 742 | return error; |
1010 | } | ||
1011 | |||
1012 | /** | ||
1013 | * Structure used for LZO data decompression. | ||
1014 | */ | ||
1015 | struct dec_data { | ||
1016 | struct task_struct *thr; /* thread */ | ||
1017 | atomic_t ready; /* ready to start flag */ | ||
1018 | atomic_t stop; /* ready to stop flag */ | ||
1019 | int ret; /* return code */ | ||
1020 | wait_queue_head_t go; /* start decompression */ | ||
1021 | wait_queue_head_t done; /* decompression done */ | ||
1022 | size_t unc_len; /* uncompressed length */ | ||
1023 | size_t cmp_len; /* compressed length */ | ||
1024 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | ||
1025 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | ||
1026 | }; | ||
1027 | |||
1028 | /** | ||
1029 | * Deompression function that runs in its own thread. | ||
1030 | */ | ||
1031 | static int lzo_decompress_threadfn(void *data) | ||
1032 | { | ||
1033 | struct dec_data *d = data; | ||
1034 | |||
1035 | while (1) { | ||
1036 | wait_event(d->go, atomic_read(&d->ready) || | ||
1037 | kthread_should_stop()); | ||
1038 | if (kthread_should_stop()) { | ||
1039 | d->thr = NULL; | ||
1040 | d->ret = -1; | ||
1041 | atomic_set(&d->stop, 1); | ||
1042 | wake_up(&d->done); | ||
1043 | break; | ||
1044 | } | ||
1045 | atomic_set(&d->ready, 0); | ||
1046 | |||
1047 | d->unc_len = LZO_UNC_SIZE; | ||
1048 | d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, | ||
1049 | d->unc, &d->unc_len); | ||
1050 | atomic_set(&d->stop, 1); | ||
1051 | wake_up(&d->done); | ||
1052 | } | ||
1053 | return 0; | ||
1054 | } | 743 | } |
1055 | 744 | ||
1056 | /** | 745 | /** |
@@ -1064,319 +753,136 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1064 | unsigned int nr_to_read) | 753 | unsigned int nr_to_read) |
1065 | { | 754 | { |
1066 | unsigned int m; | 755 | unsigned int m; |
1067 | int ret = 0; | 756 | int error = 0; |
1068 | int eof = 0; | ||
1069 | struct bio *bio; | 757 | struct bio *bio; |
1070 | struct timeval start; | 758 | struct timeval start; |
1071 | struct timeval stop; | 759 | struct timeval stop; |
1072 | unsigned nr_pages; | 760 | unsigned nr_pages; |
1073 | size_t off; | 761 | size_t i, off, unc_len, cmp_len; |
1074 | unsigned i, thr, run_threads, nr_threads; | 762 | unsigned char *unc, *cmp, *page[LZO_CMP_PAGES]; |
1075 | unsigned ring = 0, pg = 0, ring_size = 0, | ||
1076 | have = 0, want, need, asked = 0; | ||
1077 | unsigned long read_pages = 0; | ||
1078 | unsigned char **page = NULL; | ||
1079 | struct dec_data *data = NULL; | ||
1080 | struct crc_data *crc = NULL; | ||
1081 | |||
1082 | /* | ||
1083 | * We'll limit the number of threads for decompression to limit memory | ||
1084 | * footprint. | ||
1085 | */ | ||
1086 | nr_threads = num_online_cpus() - 1; | ||
1087 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | ||
1088 | |||
1089 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); | ||
1090 | if (!page) { | ||
1091 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | ||
1092 | ret = -ENOMEM; | ||
1093 | goto out_clean; | ||
1094 | } | ||
1095 | 763 | ||
1096 | data = vmalloc(sizeof(*data) * nr_threads); | 764 | for (i = 0; i < LZO_CMP_PAGES; i++) { |
1097 | if (!data) { | 765 | page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
1098 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | 766 | if (!page[i]) { |
1099 | ret = -ENOMEM; | 767 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
1100 | goto out_clean; | ||
1101 | } | ||
1102 | for (thr = 0; thr < nr_threads; thr++) | ||
1103 | memset(&data[thr], 0, offsetof(struct dec_data, go)); | ||
1104 | 768 | ||
1105 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 769 | while (i) |
1106 | if (!crc) { | 770 | free_page((unsigned long)page[--i]); |
1107 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 771 | |
1108 | ret = -ENOMEM; | 772 | return -ENOMEM; |
1109 | goto out_clean; | ||
1110 | } | ||
1111 | memset(crc, 0, offsetof(struct crc_data, go)); | ||
1112 | |||
1113 | /* | ||
1114 | * Start the decompression threads. | ||
1115 | */ | ||
1116 | for (thr = 0; thr < nr_threads; thr++) { | ||
1117 | init_waitqueue_head(&data[thr].go); | ||
1118 | init_waitqueue_head(&data[thr].done); | ||
1119 | |||
1120 | data[thr].thr = kthread_run(lzo_decompress_threadfn, | ||
1121 | &data[thr], | ||
1122 | "image_decompress/%u", thr); | ||
1123 | if (IS_ERR(data[thr].thr)) { | ||
1124 | data[thr].thr = NULL; | ||
1125 | printk(KERN_ERR | ||
1126 | "PM: Cannot start decompression threads\n"); | ||
1127 | ret = -ENOMEM; | ||
1128 | goto out_clean; | ||
1129 | } | 773 | } |
1130 | } | 774 | } |
1131 | 775 | ||
1132 | /* | 776 | unc = vmalloc(LZO_UNC_SIZE); |
1133 | * Start the CRC32 thread. | 777 | if (!unc) { |
1134 | */ | 778 | printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); |
1135 | init_waitqueue_head(&crc->go); | ||
1136 | init_waitqueue_head(&crc->done); | ||
1137 | |||
1138 | handle->crc32 = 0; | ||
1139 | crc->crc32 = &handle->crc32; | ||
1140 | for (thr = 0; thr < nr_threads; thr++) { | ||
1141 | crc->unc[thr] = data[thr].unc; | ||
1142 | crc->unc_len[thr] = &data[thr].unc_len; | ||
1143 | } | ||
1144 | 779 | ||
1145 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 780 | for (i = 0; i < LZO_CMP_PAGES; i++) |
1146 | if (IS_ERR(crc->thr)) { | 781 | free_page((unsigned long)page[i]); |
1147 | crc->thr = NULL; | 782 | |
1148 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 783 | return -ENOMEM; |
1149 | ret = -ENOMEM; | ||
1150 | goto out_clean; | ||
1151 | } | 784 | } |
1152 | 785 | ||
1153 | /* | 786 | cmp = vmalloc(LZO_CMP_SIZE); |
1154 | * Set the number of pages for read buffering. | 787 | if (!cmp) { |
1155 | * This is complete guesswork, because we'll only know the real | 788 | printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); |
1156 | * picture once prepare_image() is called, which is much later on | ||
1157 | * during the image load phase. We'll assume the worst case and | ||
1158 | * say that none of the image pages are from high memory. | ||
1159 | */ | ||
1160 | if (low_free_pages() > snapshot_get_image_size()) | ||
1161 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; | ||
1162 | read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); | ||
1163 | |||
1164 | for (i = 0; i < read_pages; i++) { | ||
1165 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | ||
1166 | __GFP_WAIT | __GFP_HIGH : | ||
1167 | __GFP_WAIT | __GFP_NOWARN | | ||
1168 | __GFP_NORETRY); | ||
1169 | 789 | ||
1170 | if (!page[i]) { | 790 | vfree(unc); |
1171 | if (i < LZO_CMP_PAGES) { | 791 | for (i = 0; i < LZO_CMP_PAGES; i++) |
1172 | ring_size = i; | 792 | free_page((unsigned long)page[i]); |
1173 | printk(KERN_ERR | 793 | |
1174 | "PM: Failed to allocate LZO pages\n"); | 794 | return -ENOMEM; |
1175 | ret = -ENOMEM; | ||
1176 | goto out_clean; | ||
1177 | } else { | ||
1178 | break; | ||
1179 | } | ||
1180 | } | ||
1181 | } | 795 | } |
1182 | want = ring_size = i; | ||
1183 | 796 | ||
1184 | printk(KERN_INFO | 797 | printk(KERN_INFO |
1185 | "PM: Using %u thread(s) for decompression.\n" | 798 | "PM: Loading and decompressing image data (%u pages) ... ", |
1186 | "PM: Loading and decompressing image data (%u pages)...\n", | 799 | nr_to_read); |
1187 | nr_threads, nr_to_read); | 800 | m = nr_to_read / 100; |
1188 | m = nr_to_read / 10; | ||
1189 | if (!m) | 801 | if (!m) |
1190 | m = 1; | 802 | m = 1; |
1191 | nr_pages = 0; | 803 | nr_pages = 0; |
1192 | bio = NULL; | 804 | bio = NULL; |
1193 | do_gettimeofday(&start); | 805 | do_gettimeofday(&start); |
1194 | 806 | ||
1195 | ret = snapshot_write_next(snapshot); | 807 | error = snapshot_write_next(snapshot); |
1196 | if (ret <= 0) | 808 | if (error <= 0) |
1197 | goto out_finish; | 809 | goto out_finish; |
1198 | 810 | ||
1199 | for(;;) { | 811 | for (;;) { |
1200 | for (i = 0; !eof && i < want; i++) { | 812 | error = swap_read_page(handle, page[0], NULL); /* sync */ |
1201 | ret = swap_read_page(handle, page[ring], &bio); | 813 | if (error) |
1202 | if (ret) { | 814 | break; |
1203 | /* | ||
1204 | * On real read error, finish. On end of data, | ||
1205 | * set EOF flag and just exit the read loop. | ||
1206 | */ | ||
1207 | if (handle->cur && | ||
1208 | handle->cur->entries[handle->k]) { | ||
1209 | goto out_finish; | ||
1210 | } else { | ||
1211 | eof = 1; | ||
1212 | break; | ||
1213 | } | ||
1214 | } | ||
1215 | if (++ring >= ring_size) | ||
1216 | ring = 0; | ||
1217 | } | ||
1218 | asked += i; | ||
1219 | want -= i; | ||
1220 | |||
1221 | /* | ||
1222 | * We are out of data, wait for some more. | ||
1223 | */ | ||
1224 | if (!have) { | ||
1225 | if (!asked) | ||
1226 | break; | ||
1227 | |||
1228 | ret = hib_wait_on_bio_chain(&bio); | ||
1229 | if (ret) | ||
1230 | goto out_finish; | ||
1231 | have += asked; | ||
1232 | asked = 0; | ||
1233 | if (eof) | ||
1234 | eof = 2; | ||
1235 | } | ||
1236 | 815 | ||
1237 | if (crc->run_threads) { | 816 | cmp_len = *(size_t *)page[0]; |
1238 | wait_event(crc->done, atomic_read(&crc->stop)); | 817 | if (unlikely(!cmp_len || |
1239 | atomic_set(&crc->stop, 0); | 818 | cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { |
1240 | crc->run_threads = 0; | 819 | printk(KERN_ERR "PM: Invalid LZO compressed length\n"); |
820 | error = -1; | ||
821 | break; | ||
1241 | } | 822 | } |
1242 | 823 | ||
1243 | for (thr = 0; have && thr < nr_threads; thr++) { | 824 | for (off = PAGE_SIZE, i = 1; |
1244 | data[thr].cmp_len = *(size_t *)page[pg]; | 825 | off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { |
1245 | if (unlikely(!data[thr].cmp_len || | 826 | error = swap_read_page(handle, page[i], &bio); |
1246 | data[thr].cmp_len > | 827 | if (error) |
1247 | lzo1x_worst_compress(LZO_UNC_SIZE))) { | ||
1248 | printk(KERN_ERR | ||
1249 | "PM: Invalid LZO compressed length\n"); | ||
1250 | ret = -1; | ||
1251 | goto out_finish; | 828 | goto out_finish; |
1252 | } | 829 | } |
1253 | |||
1254 | need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, | ||
1255 | PAGE_SIZE); | ||
1256 | if (need > have) { | ||
1257 | if (eof > 1) { | ||
1258 | ret = -1; | ||
1259 | goto out_finish; | ||
1260 | } | ||
1261 | break; | ||
1262 | } | ||
1263 | 830 | ||
1264 | for (off = 0; | 831 | error = hib_wait_on_bio_chain(&bio); /* need all data now */ |
1265 | off < LZO_HEADER + data[thr].cmp_len; | 832 | if (error) |
1266 | off += PAGE_SIZE) { | 833 | goto out_finish; |
1267 | memcpy(data[thr].cmp + off, | ||
1268 | page[pg], PAGE_SIZE); | ||
1269 | have--; | ||
1270 | want++; | ||
1271 | if (++pg >= ring_size) | ||
1272 | pg = 0; | ||
1273 | } | ||
1274 | 834 | ||
1275 | atomic_set(&data[thr].ready, 1); | 835 | for (off = 0, i = 0; |
1276 | wake_up(&data[thr].go); | 836 | off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { |
837 | memcpy(cmp + off, page[i], PAGE_SIZE); | ||
1277 | } | 838 | } |
1278 | 839 | ||
1279 | /* | 840 | unc_len = LZO_UNC_SIZE; |
1280 | * Wait for more data while we are decompressing. | 841 | error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, |
1281 | */ | 842 | unc, &unc_len); |
1282 | if (have < LZO_CMP_PAGES && asked) { | 843 | if (error < 0) { |
1283 | ret = hib_wait_on_bio_chain(&bio); | 844 | printk(KERN_ERR "PM: LZO decompression failed\n"); |
1284 | if (ret) | 845 | break; |
1285 | goto out_finish; | ||
1286 | have += asked; | ||
1287 | asked = 0; | ||
1288 | if (eof) | ||
1289 | eof = 2; | ||
1290 | } | 846 | } |
1291 | 847 | ||
1292 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { | 848 | if (unlikely(!unc_len || |
1293 | wait_event(data[thr].done, | 849 | unc_len > LZO_UNC_SIZE || |
1294 | atomic_read(&data[thr].stop)); | 850 | unc_len & (PAGE_SIZE - 1))) { |
1295 | atomic_set(&data[thr].stop, 0); | 851 | printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); |
852 | error = -1; | ||
853 | break; | ||
854 | } | ||
1296 | 855 | ||
1297 | ret = data[thr].ret; | 856 | for (off = 0; off < unc_len; off += PAGE_SIZE) { |
857 | memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); | ||
1298 | 858 | ||
1299 | if (ret < 0) { | 859 | if (!(nr_pages % m)) |
1300 | printk(KERN_ERR | 860 | printk("\b\b\b\b%3d%%", nr_pages / m); |
1301 | "PM: LZO decompression failed\n"); | 861 | nr_pages++; |
1302 | goto out_finish; | ||
1303 | } | ||
1304 | 862 | ||
1305 | if (unlikely(!data[thr].unc_len || | 863 | error = snapshot_write_next(snapshot); |
1306 | data[thr].unc_len > LZO_UNC_SIZE || | 864 | if (error <= 0) |
1307 | data[thr].unc_len & (PAGE_SIZE - 1))) { | ||
1308 | printk(KERN_ERR | ||
1309 | "PM: Invalid LZO uncompressed length\n"); | ||
1310 | ret = -1; | ||
1311 | goto out_finish; | 865 | goto out_finish; |
1312 | } | ||
1313 | |||
1314 | for (off = 0; | ||
1315 | off < data[thr].unc_len; off += PAGE_SIZE) { | ||
1316 | memcpy(data_of(*snapshot), | ||
1317 | data[thr].unc + off, PAGE_SIZE); | ||
1318 | |||
1319 | if (!(nr_pages % m)) | ||
1320 | printk(KERN_INFO | ||
1321 | "PM: Image loading progress: " | ||
1322 | "%3d%%\n", | ||
1323 | nr_pages / m * 10); | ||
1324 | nr_pages++; | ||
1325 | |||
1326 | ret = snapshot_write_next(snapshot); | ||
1327 | if (ret <= 0) { | ||
1328 | crc->run_threads = thr + 1; | ||
1329 | atomic_set(&crc->ready, 1); | ||
1330 | wake_up(&crc->go); | ||
1331 | goto out_finish; | ||
1332 | } | ||
1333 | } | ||
1334 | } | 866 | } |
1335 | |||
1336 | crc->run_threads = thr; | ||
1337 | atomic_set(&crc->ready, 1); | ||
1338 | wake_up(&crc->go); | ||
1339 | } | 867 | } |
1340 | 868 | ||
1341 | out_finish: | 869 | out_finish: |
1342 | if (crc->run_threads) { | ||
1343 | wait_event(crc->done, atomic_read(&crc->stop)); | ||
1344 | atomic_set(&crc->stop, 0); | ||
1345 | } | ||
1346 | do_gettimeofday(&stop); | 870 | do_gettimeofday(&stop); |
1347 | if (!ret) { | 871 | if (!error) { |
1348 | printk(KERN_INFO "PM: Image loading done.\n"); | 872 | printk("\b\b\b\bdone\n"); |
1349 | snapshot_write_finalize(snapshot); | 873 | snapshot_write_finalize(snapshot); |
1350 | if (!snapshot_image_loaded(snapshot)) | 874 | if (!snapshot_image_loaded(snapshot)) |
1351 | ret = -ENODATA; | 875 | error = -ENODATA; |
1352 | if (!ret) { | 876 | } else |
1353 | if (swsusp_header->flags & SF_CRC32_MODE) { | 877 | printk("\n"); |
1354 | if(handle->crc32 != swsusp_header->crc32) { | ||
1355 | printk(KERN_ERR | ||
1356 | "PM: Invalid image CRC32!\n"); | ||
1357 | ret = -ENODATA; | ||
1358 | } | ||
1359 | } | ||
1360 | } | ||
1361 | } | ||
1362 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 878 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
1363 | out_clean: | 879 | |
1364 | for (i = 0; i < ring_size; i++) | 880 | vfree(cmp); |
881 | vfree(unc); | ||
882 | for (i = 0; i < LZO_CMP_PAGES; i++) | ||
1365 | free_page((unsigned long)page[i]); | 883 | free_page((unsigned long)page[i]); |
1366 | if (crc) { | ||
1367 | if (crc->thr) | ||
1368 | kthread_stop(crc->thr); | ||
1369 | kfree(crc); | ||
1370 | } | ||
1371 | if (data) { | ||
1372 | for (thr = 0; thr < nr_threads; thr++) | ||
1373 | if (data[thr].thr) | ||
1374 | kthread_stop(data[thr].thr); | ||
1375 | vfree(data); | ||
1376 | } | ||
1377 | if (page) vfree(page); | ||
1378 | 884 | ||
1379 | return ret; | 885 | return error; |
1380 | } | 886 | } |
1381 | 887 | ||
1382 | /** | 888 | /** |
@@ -1472,34 +978,6 @@ void swsusp_close(fmode_t mode) | |||
1472 | blkdev_put(hib_resume_bdev, mode); | 978 | blkdev_put(hib_resume_bdev, mode); |
1473 | } | 979 | } |
1474 | 980 | ||
1475 | /** | ||
1476 | * swsusp_unmark - Unmark swsusp signature in the resume device | ||
1477 | */ | ||
1478 | |||
1479 | #ifdef CONFIG_SUSPEND | ||
1480 | int swsusp_unmark(void) | ||
1481 | { | ||
1482 | int error; | ||
1483 | |||
1484 | hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); | ||
1485 | if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { | ||
1486 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); | ||
1487 | error = hib_bio_write_page(swsusp_resume_block, | ||
1488 | swsusp_header, NULL); | ||
1489 | } else { | ||
1490 | printk(KERN_ERR "PM: Cannot find swsusp signature!\n"); | ||
1491 | error = -ENODEV; | ||
1492 | } | ||
1493 | |||
1494 | /* | ||
1495 | * We just returned from suspend, we don't need the image any more. | ||
1496 | */ | ||
1497 | free_all_swap_pages(root_swap); | ||
1498 | |||
1499 | return error; | ||
1500 | } | ||
1501 | #endif | ||
1502 | |||
1503 | static int swsusp_header_init(void) | 981 | static int swsusp_header_init(void) |
1504 | { | 982 | { |
1505 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); | 983 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 4ed81e74f86..42ddbc6f0de 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -20,15 +20,37 @@ | |||
20 | #include <linux/swapops.h> | 20 | #include <linux/swapops.h> |
21 | #include <linux/pm.h> | 21 | #include <linux/pm.h> |
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/compat.h> | ||
24 | #include <linux/console.h> | 23 | #include <linux/console.h> |
25 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
26 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
26 | #include <scsi/scsi_scan.h> | ||
27 | 27 | ||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | 29 | ||
30 | #include "power.h" | 30 | #include "power.h" |
31 | 31 | ||
32 | /* | ||
33 | * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and | ||
34 | * will be removed in the future. They are only preserved here for | ||
35 | * compatibility with existing userland utilities. | ||
36 | */ | ||
37 | #define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int) | ||
38 | #define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int) | ||
39 | |||
40 | #define PMOPS_PREPARE 1 | ||
41 | #define PMOPS_ENTER 2 | ||
42 | #define PMOPS_FINISH 3 | ||
43 | |||
44 | /* | ||
45 | * NOTE: The following ioctl definitions are wrong and have been replaced with | ||
46 | * correct ones. They are only preserved here for compatibility with existing | ||
47 | * userland utilities and will be removed in the future. | ||
48 | */ | ||
49 | #define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *) | ||
50 | #define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long) | ||
51 | #define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *) | ||
52 | #define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *) | ||
53 | |||
32 | 54 | ||
33 | #define SNAPSHOT_MINOR 231 | 55 | #define SNAPSHOT_MINOR 231 |
34 | 56 | ||
@@ -48,7 +70,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
48 | struct snapshot_data *data; | 70 | struct snapshot_data *data; |
49 | int error; | 71 | int error; |
50 | 72 | ||
51 | lock_system_sleep(); | 73 | mutex_lock(&pm_mutex); |
52 | 74 | ||
53 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 75 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
54 | error = -EBUSY; | 76 | error = -EBUSY; |
@@ -83,6 +105,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
83 | * appear. | 105 | * appear. |
84 | */ | 106 | */ |
85 | wait_for_device_probe(); | 107 | wait_for_device_probe(); |
108 | scsi_complete_async_scans(); | ||
86 | 109 | ||
87 | data->swap = -1; | 110 | data->swap = -1; |
88 | data->mode = O_WRONLY; | 111 | data->mode = O_WRONLY; |
@@ -99,7 +122,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
99 | data->platform_support = 0; | 122 | data->platform_support = 0; |
100 | 123 | ||
101 | Unlock: | 124 | Unlock: |
102 | unlock_system_sleep(); | 125 | mutex_unlock(&pm_mutex); |
103 | 126 | ||
104 | return error; | 127 | return error; |
105 | } | 128 | } |
@@ -108,7 +131,7 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
108 | { | 131 | { |
109 | struct snapshot_data *data; | 132 | struct snapshot_data *data; |
110 | 133 | ||
111 | lock_system_sleep(); | 134 | mutex_lock(&pm_mutex); |
112 | 135 | ||
113 | swsusp_free(); | 136 | swsusp_free(); |
114 | free_basic_memory_bitmaps(); | 137 | free_basic_memory_bitmaps(); |
@@ -122,7 +145,7 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
122 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 145 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
123 | atomic_inc(&snapshot_device_available); | 146 | atomic_inc(&snapshot_device_available); |
124 | 147 | ||
125 | unlock_system_sleep(); | 148 | mutex_unlock(&pm_mutex); |
126 | 149 | ||
127 | return 0; | 150 | return 0; |
128 | } | 151 | } |
@@ -134,7 +157,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf, | |||
134 | ssize_t res; | 157 | ssize_t res; |
135 | loff_t pg_offp = *offp & ~PAGE_MASK; | 158 | loff_t pg_offp = *offp & ~PAGE_MASK; |
136 | 159 | ||
137 | lock_system_sleep(); | 160 | mutex_lock(&pm_mutex); |
138 | 161 | ||
139 | data = filp->private_data; | 162 | data = filp->private_data; |
140 | if (!data->ready) { | 163 | if (!data->ready) { |
@@ -155,7 +178,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf, | |||
155 | *offp += res; | 178 | *offp += res; |
156 | 179 | ||
157 | Unlock: | 180 | Unlock: |
158 | unlock_system_sleep(); | 181 | mutex_unlock(&pm_mutex); |
159 | 182 | ||
160 | return res; | 183 | return res; |
161 | } | 184 | } |
@@ -167,7 +190,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, | |||
167 | ssize_t res; | 190 | ssize_t res; |
168 | loff_t pg_offp = *offp & ~PAGE_MASK; | 191 | loff_t pg_offp = *offp & ~PAGE_MASK; |
169 | 192 | ||
170 | lock_system_sleep(); | 193 | mutex_lock(&pm_mutex); |
171 | 194 | ||
172 | data = filp->private_data; | 195 | data = filp->private_data; |
173 | 196 | ||
@@ -184,11 +207,20 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, | |||
184 | if (res > 0) | 207 | if (res > 0) |
185 | *offp += res; | 208 | *offp += res; |
186 | unlock: | 209 | unlock: |
187 | unlock_system_sleep(); | 210 | mutex_unlock(&pm_mutex); |
188 | 211 | ||
189 | return res; | 212 | return res; |
190 | } | 213 | } |
191 | 214 | ||
215 | static void snapshot_deprecated_ioctl(unsigned int cmd) | ||
216 | { | ||
217 | if (printk_ratelimit()) | ||
218 | printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will " | ||
219 | "be removed soon, update your suspend-to-disk " | ||
220 | "utilities\n", | ||
221 | __builtin_return_address(0), cmd); | ||
222 | } | ||
223 | |||
192 | static long snapshot_ioctl(struct file *filp, unsigned int cmd, | 224 | static long snapshot_ioctl(struct file *filp, unsigned int cmd, |
193 | unsigned long arg) | 225 | unsigned long arg) |
194 | { | 226 | { |
@@ -219,7 +251,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
219 | sys_sync(); | 251 | sys_sync(); |
220 | printk("done.\n"); | 252 | printk("done.\n"); |
221 | 253 | ||
254 | error = usermodehelper_disable(); | ||
255 | if (error) | ||
256 | break; | ||
257 | |||
222 | error = freeze_processes(); | 258 | error = freeze_processes(); |
259 | if (error) { | ||
260 | thaw_processes(); | ||
261 | usermodehelper_enable(); | ||
262 | } | ||
223 | if (!error) | 263 | if (!error) |
224 | data->frozen = 1; | 264 | data->frozen = 1; |
225 | break; | 265 | break; |
@@ -229,9 +269,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
229 | break; | 269 | break; |
230 | pm_restore_gfp_mask(); | 270 | pm_restore_gfp_mask(); |
231 | thaw_processes(); | 271 | thaw_processes(); |
272 | usermodehelper_enable(); | ||
232 | data->frozen = 0; | 273 | data->frozen = 0; |
233 | break; | 274 | break; |
234 | 275 | ||
276 | case SNAPSHOT_ATOMIC_SNAPSHOT: | ||
277 | snapshot_deprecated_ioctl(cmd); | ||
235 | case SNAPSHOT_CREATE_IMAGE: | 278 | case SNAPSHOT_CREATE_IMAGE: |
236 | if (data->mode != O_RDONLY || !data->frozen || data->ready) { | 279 | if (data->mode != O_RDONLY || !data->frozen || data->ready) { |
237 | error = -EPERM; | 280 | error = -EPERM; |
@@ -239,11 +282,10 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
239 | } | 282 | } |
240 | pm_restore_gfp_mask(); | 283 | pm_restore_gfp_mask(); |
241 | error = hibernation_snapshot(data->platform_support); | 284 | error = hibernation_snapshot(data->platform_support); |
242 | if (!error) { | 285 | if (!error) |
243 | error = put_user(in_suspend, (int __user *)arg); | 286 | error = put_user(in_suspend, (int __user *)arg); |
244 | data->ready = !freezer_test_done && !error; | 287 | if (!error) |
245 | freezer_test_done = false; | 288 | data->ready = 1; |
246 | } | ||
247 | break; | 289 | break; |
248 | 290 | ||
249 | case SNAPSHOT_ATOMIC_RESTORE: | 291 | case SNAPSHOT_ATOMIC_RESTORE: |
@@ -260,17 +302,10 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
260 | swsusp_free(); | 302 | swsusp_free(); |
261 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); | 303 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); |
262 | data->ready = 0; | 304 | data->ready = 0; |
263 | /* | ||
264 | * It is necessary to thaw kernel threads here, because | ||
265 | * SNAPSHOT_CREATE_IMAGE may be invoked directly after | ||
266 | * SNAPSHOT_FREE. In that case, if kernel threads were not | ||
267 | * thawed, the preallocation of memory carried out by | ||
268 | * hibernation_snapshot() might run into problems (i.e. it | ||
269 | * might fail or even deadlock). | ||
270 | */ | ||
271 | thaw_kernel_threads(); | ||
272 | break; | 305 | break; |
273 | 306 | ||
307 | case SNAPSHOT_SET_IMAGE_SIZE: | ||
308 | snapshot_deprecated_ioctl(cmd); | ||
274 | case SNAPSHOT_PREF_IMAGE_SIZE: | 309 | case SNAPSHOT_PREF_IMAGE_SIZE: |
275 | image_size = arg; | 310 | image_size = arg; |
276 | break; | 311 | break; |
@@ -285,12 +320,16 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
285 | error = put_user(size, (loff_t __user *)arg); | 320 | error = put_user(size, (loff_t __user *)arg); |
286 | break; | 321 | break; |
287 | 322 | ||
323 | case SNAPSHOT_AVAIL_SWAP: | ||
324 | snapshot_deprecated_ioctl(cmd); | ||
288 | case SNAPSHOT_AVAIL_SWAP_SIZE: | 325 | case SNAPSHOT_AVAIL_SWAP_SIZE: |
289 | size = count_swap_pages(data->swap, 1); | 326 | size = count_swap_pages(data->swap, 1); |
290 | size <<= PAGE_SHIFT; | 327 | size <<= PAGE_SHIFT; |
291 | error = put_user(size, (loff_t __user *)arg); | 328 | error = put_user(size, (loff_t __user *)arg); |
292 | break; | 329 | break; |
293 | 330 | ||
331 | case SNAPSHOT_GET_SWAP_PAGE: | ||
332 | snapshot_deprecated_ioctl(cmd); | ||
294 | case SNAPSHOT_ALLOC_SWAP_PAGE: | 333 | case SNAPSHOT_ALLOC_SWAP_PAGE: |
295 | if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { | 334 | if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { |
296 | error = -ENODEV; | 335 | error = -ENODEV; |
@@ -313,6 +352,27 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
313 | free_all_swap_pages(data->swap); | 352 | free_all_swap_pages(data->swap); |
314 | break; | 353 | break; |
315 | 354 | ||
355 | case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */ | ||
356 | snapshot_deprecated_ioctl(cmd); | ||
357 | if (!swsusp_swap_in_use()) { | ||
358 | /* | ||
359 | * User space encodes device types as two-byte values, | ||
360 | * so we need to recode them | ||
361 | */ | ||
362 | if (old_decode_dev(arg)) { | ||
363 | data->swap = swap_type_of(old_decode_dev(arg), | ||
364 | 0, NULL); | ||
365 | if (data->swap < 0) | ||
366 | error = -ENODEV; | ||
367 | } else { | ||
368 | data->swap = -1; | ||
369 | error = -EINVAL; | ||
370 | } | ||
371 | } else { | ||
372 | error = -EPERM; | ||
373 | } | ||
374 | break; | ||
375 | |||
316 | case SNAPSHOT_S2RAM: | 376 | case SNAPSHOT_S2RAM: |
317 | if (!data->frozen) { | 377 | if (!data->frozen) { |
318 | error = -EPERM; | 378 | error = -EPERM; |
@@ -335,6 +395,33 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
335 | error = hibernation_platform_enter(); | 395 | error = hibernation_platform_enter(); |
336 | break; | 396 | break; |
337 | 397 | ||
398 | case SNAPSHOT_PMOPS: /* This ioctl is deprecated */ | ||
399 | snapshot_deprecated_ioctl(cmd); | ||
400 | error = -EINVAL; | ||
401 | |||
402 | switch (arg) { | ||
403 | |||
404 | case PMOPS_PREPARE: | ||
405 | data->platform_support = 1; | ||
406 | error = 0; | ||
407 | break; | ||
408 | |||
409 | case PMOPS_ENTER: | ||
410 | if (data->platform_support) | ||
411 | error = hibernation_platform_enter(); | ||
412 | break; | ||
413 | |||
414 | case PMOPS_FINISH: | ||
415 | if (data->platform_support) | ||
416 | error = 0; | ||
417 | break; | ||
418 | |||
419 | default: | ||
420 | printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg); | ||
421 | |||
422 | } | ||
423 | break; | ||
424 | |||
338 | case SNAPSHOT_SET_SWAP_AREA: | 425 | case SNAPSHOT_SET_SWAP_AREA: |
339 | if (swsusp_swap_in_use()) { | 426 | if (swsusp_swap_in_use()) { |
340 | error = -EPERM; | 427 | error = -EPERM; |
@@ -376,66 +463,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
376 | return error; | 463 | return error; |
377 | } | 464 | } |
378 | 465 | ||
379 | #ifdef CONFIG_COMPAT | ||
380 | |||
381 | struct compat_resume_swap_area { | ||
382 | compat_loff_t offset; | ||
383 | u32 dev; | ||
384 | } __packed; | ||
385 | |||
386 | static long | ||
387 | snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
388 | { | ||
389 | BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t)); | ||
390 | |||
391 | switch (cmd) { | ||
392 | case SNAPSHOT_GET_IMAGE_SIZE: | ||
393 | case SNAPSHOT_AVAIL_SWAP_SIZE: | ||
394 | case SNAPSHOT_ALLOC_SWAP_PAGE: { | ||
395 | compat_loff_t __user *uoffset = compat_ptr(arg); | ||
396 | loff_t offset; | ||
397 | mm_segment_t old_fs; | ||
398 | int err; | ||
399 | |||
400 | old_fs = get_fs(); | ||
401 | set_fs(KERNEL_DS); | ||
402 | err = snapshot_ioctl(file, cmd, (unsigned long) &offset); | ||
403 | set_fs(old_fs); | ||
404 | if (!err && put_user(offset, uoffset)) | ||
405 | err = -EFAULT; | ||
406 | return err; | ||
407 | } | ||
408 | |||
409 | case SNAPSHOT_CREATE_IMAGE: | ||
410 | return snapshot_ioctl(file, cmd, | ||
411 | (unsigned long) compat_ptr(arg)); | ||
412 | |||
413 | case SNAPSHOT_SET_SWAP_AREA: { | ||
414 | struct compat_resume_swap_area __user *u_swap_area = | ||
415 | compat_ptr(arg); | ||
416 | struct resume_swap_area swap_area; | ||
417 | mm_segment_t old_fs; | ||
418 | int err; | ||
419 | |||
420 | err = get_user(swap_area.offset, &u_swap_area->offset); | ||
421 | err |= get_user(swap_area.dev, &u_swap_area->dev); | ||
422 | if (err) | ||
423 | return -EFAULT; | ||
424 | old_fs = get_fs(); | ||
425 | set_fs(KERNEL_DS); | ||
426 | err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA, | ||
427 | (unsigned long) &swap_area); | ||
428 | set_fs(old_fs); | ||
429 | return err; | ||
430 | } | ||
431 | |||
432 | default: | ||
433 | return snapshot_ioctl(file, cmd, arg); | ||
434 | } | ||
435 | } | ||
436 | |||
437 | #endif /* CONFIG_COMPAT */ | ||
438 | |||
439 | static const struct file_operations snapshot_fops = { | 466 | static const struct file_operations snapshot_fops = { |
440 | .open = snapshot_open, | 467 | .open = snapshot_open, |
441 | .release = snapshot_release, | 468 | .release = snapshot_release, |
@@ -443,9 +470,6 @@ static const struct file_operations snapshot_fops = { | |||
443 | .write = snapshot_write, | 470 | .write = snapshot_write, |
444 | .llseek = no_llseek, | 471 | .llseek = no_llseek, |
445 | .unlocked_ioctl = snapshot_ioctl, | 472 | .unlocked_ioctl = snapshot_ioctl, |
446 | #ifdef CONFIG_COMPAT | ||
447 | .compat_ioctl = snapshot_compat_ioctl, | ||
448 | #endif | ||
449 | }; | 473 | }; |
450 | 474 | ||
451 | static struct miscdevice snapshot_device = { | 475 | static struct miscdevice snapshot_device = { |
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 8f50de394d2..81e1b7c65ca 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c | |||
@@ -1,266 +1,634 @@ | |||
1 | /* | 1 | /* kernel/power/wakelock.c |
2 | * kernel/power/wakelock.c | ||
3 | * | 2 | * |
4 | * User space wakeup sources support. | 3 | * Copyright (C) 2005-2008 Google, Inc. |
5 | * | 4 | * |
6 | * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> | 5 | * This software is licensed under the terms of the GNU General Public |
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
7 | * | 13 | * |
8 | * This code is based on the analogous interface allowing user space to | ||
9 | * manipulate wakelocks on Android. | ||
10 | */ | 14 | */ |
11 | 15 | ||
12 | #include <linux/capability.h> | 16 | #include <linux/module.h> |
13 | #include <linux/ctype.h> | 17 | #include <linux/platform_device.h> |
14 | #include <linux/device.h> | 18 | #include <linux/rtc.h> |
15 | #include <linux/err.h> | 19 | #include <linux/suspend.h> |
16 | #include <linux/hrtimer.h> | 20 | #include <linux/syscalls.h> /* sys_sync */ |
17 | #include <linux/list.h> | 21 | #include <linux/wakelock.h> |
18 | #include <linux/rbtree.h> | 22 | #ifdef CONFIG_WAKELOCK_STAT |
19 | #include <linux/slab.h> | 23 | #include <linux/proc_fs.h> |
20 | |||
21 | static DEFINE_MUTEX(wakelocks_lock); | ||
22 | |||
23 | struct wakelock { | ||
24 | char *name; | ||
25 | struct rb_node node; | ||
26 | struct wakeup_source ws; | ||
27 | #ifdef CONFIG_PM_WAKELOCKS_GC | ||
28 | struct list_head lru; | ||
29 | #endif | 24 | #endif |
25 | #include "power.h" | ||
26 | |||
27 | enum { | ||
28 | DEBUG_EXIT_SUSPEND = 1U << 0, | ||
29 | DEBUG_WAKEUP = 1U << 1, | ||
30 | DEBUG_SUSPEND = 1U << 2, | ||
31 | DEBUG_EXPIRE = 1U << 3, | ||
32 | DEBUG_WAKE_LOCK = 1U << 4, | ||
30 | }; | 33 | }; |
34 | static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP; | ||
35 | module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); | ||
36 | |||
37 | #define WAKE_LOCK_TYPE_MASK (0x0f) | ||
38 | #define WAKE_LOCK_INITIALIZED (1U << 8) | ||
39 | #define WAKE_LOCK_ACTIVE (1U << 9) | ||
40 | #define WAKE_LOCK_AUTO_EXPIRE (1U << 10) | ||
41 | #define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) | ||
42 | |||
43 | static DEFINE_SPINLOCK(list_lock); | ||
44 | static LIST_HEAD(inactive_locks); | ||
45 | static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; | ||
46 | static int current_event_num; | ||
47 | struct workqueue_struct *suspend_work_queue; | ||
48 | struct wake_lock main_wake_lock; | ||
49 | suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; | ||
50 | static struct wake_lock unknown_wakeup; | ||
51 | static struct wake_lock suspend_backoff_lock; | ||
52 | |||
53 | #define SUSPEND_BACKOFF_THRESHOLD 10 | ||
54 | #define SUSPEND_BACKOFF_INTERVAL 10000 | ||
55 | |||
56 | static unsigned suspend_short_count; | ||
57 | |||
58 | #ifdef CONFIG_WAKELOCK_STAT | ||
59 | static struct wake_lock deleted_wake_locks; | ||
60 | static ktime_t last_sleep_time_update; | ||
61 | static int wait_for_wakeup; | ||
62 | |||
63 | int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) | ||
64 | { | ||
65 | struct timespec ts; | ||
66 | struct timespec kt; | ||
67 | struct timespec tomono; | ||
68 | struct timespec delta; | ||
69 | struct timespec sleep; | ||
70 | long timeout; | ||
71 | |||
72 | if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) | ||
73 | return 0; | ||
74 | get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep); | ||
75 | timeout = lock->expires - jiffies; | ||
76 | if (timeout > 0) | ||
77 | return 0; | ||
78 | jiffies_to_timespec(-timeout, &delta); | ||
79 | set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, | ||
80 | kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); | ||
81 | *expire_time = timespec_to_ktime(ts); | ||
82 | return 1; | ||
83 | } | ||
31 | 84 | ||
32 | static struct rb_root wakelocks_tree = RB_ROOT; | ||
33 | 85 | ||
34 | ssize_t pm_show_wakelocks(char *buf, bool show_active) | 86 | static int print_lock_stat(struct seq_file *m, struct wake_lock *lock) |
35 | { | 87 | { |
36 | struct rb_node *node; | 88 | int lock_count = lock->stat.count; |
37 | struct wakelock *wl; | 89 | int expire_count = lock->stat.expire_count; |
38 | char *str = buf; | 90 | ktime_t active_time = ktime_set(0, 0); |
39 | char *end = buf + PAGE_SIZE; | 91 | ktime_t total_time = lock->stat.total_time; |
92 | ktime_t max_time = lock->stat.max_time; | ||
93 | |||
94 | ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; | ||
95 | if (lock->flags & WAKE_LOCK_ACTIVE) { | ||
96 | ktime_t now, add_time; | ||
97 | int expired = get_expired_time(lock, &now); | ||
98 | if (!expired) | ||
99 | now = ktime_get(); | ||
100 | add_time = ktime_sub(now, lock->stat.last_time); | ||
101 | lock_count++; | ||
102 | if (!expired) | ||
103 | active_time = add_time; | ||
104 | else | ||
105 | expire_count++; | ||
106 | total_time = ktime_add(total_time, add_time); | ||
107 | if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) | ||
108 | prevent_suspend_time = ktime_add(prevent_suspend_time, | ||
109 | ktime_sub(now, last_sleep_time_update)); | ||
110 | if (add_time.tv64 > max_time.tv64) | ||
111 | max_time = add_time; | ||
112 | } | ||
40 | 113 | ||
41 | mutex_lock(&wakelocks_lock); | 114 | return seq_printf(m, |
115 | "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n", | ||
116 | lock->name, lock_count, expire_count, | ||
117 | lock->stat.wakeup_count, ktime_to_ns(active_time), | ||
118 | ktime_to_ns(total_time), | ||
119 | ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), | ||
120 | ktime_to_ns(lock->stat.last_time)); | ||
121 | } | ||
42 | 122 | ||
43 | for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { | 123 | static int wakelock_stats_show(struct seq_file *m, void *unused) |
44 | wl = rb_entry(node, struct wakelock, node); | 124 | { |
45 | if (wl->ws.active == show_active) | 125 | unsigned long irqflags; |
46 | str += scnprintf(str, end - str, "%s ", wl->name); | 126 | struct wake_lock *lock; |
127 | int ret; | ||
128 | int type; | ||
129 | |||
130 | spin_lock_irqsave(&list_lock, irqflags); | ||
131 | |||
132 | ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since" | ||
133 | "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); | ||
134 | list_for_each_entry(lock, &inactive_locks, link) | ||
135 | ret = print_lock_stat(m, lock); | ||
136 | for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { | ||
137 | list_for_each_entry(lock, &active_wake_locks[type], link) | ||
138 | ret = print_lock_stat(m, lock); | ||
47 | } | 139 | } |
48 | if (str > buf) | 140 | spin_unlock_irqrestore(&list_lock, irqflags); |
49 | str--; | 141 | return 0; |
142 | } | ||
50 | 143 | ||
51 | str += scnprintf(str, end - str, "\n"); | 144 | static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) |
145 | { | ||
146 | ktime_t duration; | ||
147 | ktime_t now; | ||
148 | if (!(lock->flags & WAKE_LOCK_ACTIVE)) | ||
149 | return; | ||
150 | if (get_expired_time(lock, &now)) | ||
151 | expired = 1; | ||
152 | else | ||
153 | now = ktime_get(); | ||
154 | lock->stat.count++; | ||
155 | if (expired) | ||
156 | lock->stat.expire_count++; | ||
157 | duration = ktime_sub(now, lock->stat.last_time); | ||
158 | lock->stat.total_time = ktime_add(lock->stat.total_time, duration); | ||
159 | if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time)) | ||
160 | lock->stat.max_time = duration; | ||
161 | lock->stat.last_time = ktime_get(); | ||
162 | if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { | ||
163 | duration = ktime_sub(now, last_sleep_time_update); | ||
164 | lock->stat.prevent_suspend_time = ktime_add( | ||
165 | lock->stat.prevent_suspend_time, duration); | ||
166 | lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; | ||
167 | } | ||
168 | } | ||
52 | 169 | ||
53 | mutex_unlock(&wakelocks_lock); | 170 | static void update_sleep_wait_stats_locked(int done) |
54 | return (str - buf); | 171 | { |
172 | struct wake_lock *lock; | ||
173 | ktime_t now, etime, elapsed, add; | ||
174 | int expired; | ||
175 | |||
176 | now = ktime_get(); | ||
177 | elapsed = ktime_sub(now, last_sleep_time_update); | ||
178 | list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) { | ||
179 | expired = get_expired_time(lock, &etime); | ||
180 | if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { | ||
181 | if (expired) | ||
182 | add = ktime_sub(etime, last_sleep_time_update); | ||
183 | else | ||
184 | add = elapsed; | ||
185 | lock->stat.prevent_suspend_time = ktime_add( | ||
186 | lock->stat.prevent_suspend_time, add); | ||
187 | } | ||
188 | if (done || expired) | ||
189 | lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; | ||
190 | else | ||
191 | lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND; | ||
192 | } | ||
193 | last_sleep_time_update = now; | ||
55 | } | 194 | } |
195 | #endif | ||
56 | 196 | ||
57 | #if CONFIG_PM_WAKELOCKS_LIMIT > 0 | ||
58 | static unsigned int number_of_wakelocks; | ||
59 | 197 | ||
60 | static inline bool wakelocks_limit_exceeded(void) | 198 | static void expire_wake_lock(struct wake_lock *lock) |
61 | { | 199 | { |
62 | return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT; | 200 | #ifdef CONFIG_WAKELOCK_STAT |
201 | wake_unlock_stat_locked(lock, 1); | ||
202 | #endif | ||
203 | lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); | ||
204 | list_del(&lock->link); | ||
205 | list_add(&lock->link, &inactive_locks); | ||
206 | if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE)) | ||
207 | pr_info("expired wake lock %s\n", lock->name); | ||
63 | } | 208 | } |
64 | 209 | ||
65 | static inline void increment_wakelocks_number(void) | 210 | /* Caller must acquire the list_lock spinlock */ |
211 | static void print_active_locks(int type) | ||
66 | { | 212 | { |
67 | number_of_wakelocks++; | 213 | struct wake_lock *lock; |
214 | bool print_expired = true; | ||
215 | |||
216 | BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); | ||
217 | list_for_each_entry(lock, &active_wake_locks[type], link) { | ||
218 | if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { | ||
219 | long timeout = lock->expires - jiffies; | ||
220 | if (timeout > 0) | ||
221 | pr_info("active wake lock %s, time left %ld\n", | ||
222 | lock->name, timeout); | ||
223 | else if (print_expired) | ||
224 | pr_info("wake lock %s, expired\n", lock->name); | ||
225 | } else { | ||
226 | pr_info("active wake lock %s\n", lock->name); | ||
227 | if (!(debug_mask & DEBUG_EXPIRE)) | ||
228 | print_expired = false; | ||
229 | } | ||
230 | } | ||
68 | } | 231 | } |
69 | 232 | ||
70 | static inline void decrement_wakelocks_number(void) | 233 | static long has_wake_lock_locked(int type) |
71 | { | 234 | { |
72 | number_of_wakelocks--; | 235 | struct wake_lock *lock, *n; |
236 | long max_timeout = 0; | ||
237 | |||
238 | BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); | ||
239 | list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) { | ||
240 | if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { | ||
241 | long timeout = lock->expires - jiffies; | ||
242 | if (timeout <= 0) | ||
243 | expire_wake_lock(lock); | ||
244 | else if (timeout > max_timeout) | ||
245 | max_timeout = timeout; | ||
246 | } else | ||
247 | return -1; | ||
248 | } | ||
249 | return max_timeout; | ||
73 | } | 250 | } |
74 | #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */ | ||
75 | static inline bool wakelocks_limit_exceeded(void) { return false; } | ||
76 | static inline void increment_wakelocks_number(void) {} | ||
77 | static inline void decrement_wakelocks_number(void) {} | ||
78 | #endif /* CONFIG_PM_WAKELOCKS_LIMIT */ | ||
79 | 251 | ||
80 | #ifdef CONFIG_PM_WAKELOCKS_GC | 252 | long has_wake_lock(int type) |
81 | #define WL_GC_COUNT_MAX 100 | ||
82 | #define WL_GC_TIME_SEC 300 | ||
83 | |||
84 | static LIST_HEAD(wakelocks_lru_list); | ||
85 | static unsigned int wakelocks_gc_count; | ||
86 | |||
87 | static inline void wakelocks_lru_add(struct wakelock *wl) | ||
88 | { | 253 | { |
89 | list_add(&wl->lru, &wakelocks_lru_list); | 254 | long ret; |
255 | unsigned long irqflags; | ||
256 | spin_lock_irqsave(&list_lock, irqflags); | ||
257 | ret = has_wake_lock_locked(type); | ||
258 | if (ret && (debug_mask & DEBUG_WAKEUP) && type == WAKE_LOCK_SUSPEND) | ||
259 | print_active_locks(type); | ||
260 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
261 | return ret; | ||
90 | } | 262 | } |
91 | 263 | ||
92 | static inline void wakelocks_lru_most_recent(struct wakelock *wl) | 264 | static void suspend_backoff(void) |
93 | { | 265 | { |
94 | list_move(&wl->lru, &wakelocks_lru_list); | 266 | pr_info("suspend: too many immediate wakeups, back off\n"); |
267 | wake_lock_timeout(&suspend_backoff_lock, | ||
268 | msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL)); | ||
95 | } | 269 | } |
96 | 270 | ||
97 | static void wakelocks_gc(void) | 271 | static void suspend(struct work_struct *work) |
98 | { | 272 | { |
99 | struct wakelock *wl, *aux; | 273 | int ret; |
100 | ktime_t now; | 274 | int entry_event_num; |
275 | struct timespec ts_entry, ts_exit; | ||
101 | 276 | ||
102 | if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) | 277 | if (has_wake_lock(WAKE_LOCK_SUSPEND)) { |
278 | if (debug_mask & DEBUG_SUSPEND) | ||
279 | pr_info("suspend: abort suspend\n"); | ||
103 | return; | 280 | return; |
281 | } | ||
104 | 282 | ||
105 | now = ktime_get(); | 283 | entry_event_num = current_event_num; |
106 | list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { | 284 | sys_sync(); |
107 | u64 idle_time_ns; | 285 | if (debug_mask & DEBUG_SUSPEND) |
108 | bool active; | 286 | pr_info("suspend: enter suspend\n"); |
109 | 287 | getnstimeofday(&ts_entry); | |
110 | spin_lock_irq(&wl->ws.lock); | 288 | ret = pm_suspend(requested_suspend_state); |
111 | idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time)); | 289 | getnstimeofday(&ts_exit); |
112 | active = wl->ws.active; | 290 | |
113 | spin_unlock_irq(&wl->ws.lock); | 291 | if (debug_mask & DEBUG_EXIT_SUSPEND) { |
114 | 292 | struct rtc_time tm; | |
115 | if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC)) | 293 | rtc_time_to_tm(ts_exit.tv_sec, &tm); |
116 | break; | 294 | pr_info("suspend: exit suspend, ret = %d " |
117 | 295 | "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, | |
118 | if (!active) { | 296 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
119 | wakeup_source_remove(&wl->ws); | 297 | tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec); |
120 | rb_erase(&wl->node, &wakelocks_tree); | ||
121 | list_del(&wl->lru); | ||
122 | kfree(wl->name); | ||
123 | kfree(wl); | ||
124 | decrement_wakelocks_number(); | ||
125 | } | ||
126 | } | 298 | } |
127 | wakelocks_gc_count = 0; | 299 | |
128 | } | 300 | if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) { |
129 | #else /* !CONFIG_PM_WAKELOCKS_GC */ | 301 | ++suspend_short_count; |
130 | static inline void wakelocks_lru_add(struct wakelock *wl) {} | 302 | |
131 | static inline void wakelocks_lru_most_recent(struct wakelock *wl) {} | 303 | if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) { |
132 | static inline void wakelocks_gc(void) {} | 304 | suspend_backoff(); |
133 | #endif /* !CONFIG_PM_WAKELOCKS_GC */ | 305 | suspend_short_count = 0; |
134 | |||
135 | static struct wakelock *wakelock_lookup_add(const char *name, size_t len, | ||
136 | bool add_if_not_found) | ||
137 | { | ||
138 | struct rb_node **node = &wakelocks_tree.rb_node; | ||
139 | struct rb_node *parent = *node; | ||
140 | struct wakelock *wl; | ||
141 | |||
142 | while (*node) { | ||
143 | int diff; | ||
144 | |||
145 | parent = *node; | ||
146 | wl = rb_entry(*node, struct wakelock, node); | ||
147 | diff = strncmp(name, wl->name, len); | ||
148 | if (diff == 0) { | ||
149 | if (wl->name[len]) | ||
150 | diff = -1; | ||
151 | else | ||
152 | return wl; | ||
153 | } | 306 | } |
154 | if (diff < 0) | 307 | } else { |
155 | node = &(*node)->rb_left; | 308 | suspend_short_count = 0; |
156 | else | ||
157 | node = &(*node)->rb_right; | ||
158 | } | 309 | } |
159 | if (!add_if_not_found) | ||
160 | return ERR_PTR(-EINVAL); | ||
161 | 310 | ||
162 | if (wakelocks_limit_exceeded()) | 311 | if (current_event_num == entry_event_num) { |
163 | return ERR_PTR(-ENOSPC); | 312 | if (debug_mask & DEBUG_SUSPEND) |
313 | pr_info("suspend: pm_suspend returned with no event\n"); | ||
314 | wake_lock_timeout(&unknown_wakeup, HZ / 2); | ||
315 | } | ||
316 | } | ||
317 | static DECLARE_WORK(suspend_work, suspend); | ||
164 | 318 | ||
165 | /* Not found, we have to add a new one. */ | 319 | static void expire_wake_locks(unsigned long data) |
166 | wl = kzalloc(sizeof(*wl), GFP_KERNEL); | 320 | { |
167 | if (!wl) | 321 | long has_lock; |
168 | return ERR_PTR(-ENOMEM); | 322 | unsigned long irqflags; |
323 | if (debug_mask & DEBUG_EXPIRE) | ||
324 | pr_info("expire_wake_locks: start\n"); | ||
325 | spin_lock_irqsave(&list_lock, irqflags); | ||
326 | if (debug_mask & DEBUG_SUSPEND) | ||
327 | print_active_locks(WAKE_LOCK_SUSPEND); | ||
328 | has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND); | ||
329 | if (debug_mask & DEBUG_EXPIRE) | ||
330 | pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock); | ||
331 | if (has_lock == 0) | ||
332 | queue_work(suspend_work_queue, &suspend_work); | ||
333 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
334 | } | ||
335 | static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0); | ||
169 | 336 | ||
170 | wl->name = kstrndup(name, len, GFP_KERNEL); | 337 | static int power_suspend_late(struct device *dev) |
171 | if (!wl->name) { | 338 | { |
172 | kfree(wl); | 339 | int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; |
173 | return ERR_PTR(-ENOMEM); | 340 | #ifdef CONFIG_WAKELOCK_STAT |
174 | } | 341 | wait_for_wakeup = !ret; |
175 | wl->ws.name = wl->name; | 342 | #endif |
176 | wakeup_source_add(&wl->ws); | 343 | if (debug_mask & DEBUG_SUSPEND) |
177 | rb_link_node(&wl->node, parent, node); | 344 | pr_info("power_suspend_late return %d\n", ret); |
178 | rb_insert_color(&wl->node, &wakelocks_tree); | 345 | return ret; |
179 | wakelocks_lru_add(wl); | ||
180 | increment_wakelocks_number(); | ||
181 | return wl; | ||
182 | } | 346 | } |
183 | 347 | ||
184 | int pm_wake_lock(const char *buf) | 348 | static struct dev_pm_ops power_driver_pm_ops = { |
349 | .suspend_noirq = power_suspend_late, | ||
350 | }; | ||
351 | |||
352 | static struct platform_driver power_driver = { | ||
353 | .driver.name = "power", | ||
354 | .driver.pm = &power_driver_pm_ops, | ||
355 | }; | ||
356 | static struct platform_device power_device = { | ||
357 | .name = "power", | ||
358 | }; | ||
359 | |||
360 | void wake_lock_init(struct wake_lock *lock, int type, const char *name) | ||
185 | { | 361 | { |
186 | const char *str = buf; | 362 | unsigned long irqflags = 0; |
187 | struct wakelock *wl; | 363 | |
188 | u64 timeout_ns = 0; | 364 | if (name) |
189 | size_t len; | 365 | lock->name = name; |
190 | int ret = 0; | 366 | BUG_ON(!lock->name); |
191 | 367 | ||
192 | if (!capable(CAP_BLOCK_SUSPEND)) | 368 | if (debug_mask & DEBUG_WAKE_LOCK) |
193 | return -EPERM; | 369 | pr_info("wake_lock_init name=%s\n", lock->name); |
194 | 370 | #ifdef CONFIG_WAKELOCK_STAT | |
195 | while (*str && !isspace(*str)) | 371 | lock->stat.count = 0; |
196 | str++; | 372 | lock->stat.expire_count = 0; |
197 | 373 | lock->stat.wakeup_count = 0; | |
198 | len = str - buf; | 374 | lock->stat.total_time = ktime_set(0, 0); |
199 | if (!len) | 375 | lock->stat.prevent_suspend_time = ktime_set(0, 0); |
200 | return -EINVAL; | 376 | lock->stat.max_time = ktime_set(0, 0); |
201 | 377 | lock->stat.last_time = ktime_set(0, 0); | |
202 | if (*str && *str != '\n') { | 378 | #endif |
203 | /* Find out if there's a valid timeout string appended. */ | 379 | lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED; |
204 | ret = kstrtou64(skip_spaces(str), 10, &timeout_ns); | ||
205 | if (ret) | ||
206 | return -EINVAL; | ||
207 | } | ||
208 | 380 | ||
209 | mutex_lock(&wakelocks_lock); | 381 | INIT_LIST_HEAD(&lock->link); |
382 | spin_lock_irqsave(&list_lock, irqflags); | ||
383 | list_add(&lock->link, &inactive_locks); | ||
384 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
385 | } | ||
386 | EXPORT_SYMBOL(wake_lock_init); | ||
210 | 387 | ||
211 | wl = wakelock_lookup_add(buf, len, true); | 388 | void wake_lock_destroy(struct wake_lock *lock) |
212 | if (IS_ERR(wl)) { | 389 | { |
213 | ret = PTR_ERR(wl); | 390 | unsigned long irqflags; |
214 | goto out; | 391 | if (debug_mask & DEBUG_WAKE_LOCK) |
392 | pr_info("wake_lock_destroy name=%s\n", lock->name); | ||
393 | spin_lock_irqsave(&list_lock, irqflags); | ||
394 | lock->flags &= ~WAKE_LOCK_INITIALIZED; | ||
395 | #ifdef CONFIG_WAKELOCK_STAT | ||
396 | if (lock->stat.count) { | ||
397 | deleted_wake_locks.stat.count += lock->stat.count; | ||
398 | deleted_wake_locks.stat.expire_count += lock->stat.expire_count; | ||
399 | deleted_wake_locks.stat.total_time = | ||
400 | ktime_add(deleted_wake_locks.stat.total_time, | ||
401 | lock->stat.total_time); | ||
402 | deleted_wake_locks.stat.prevent_suspend_time = | ||
403 | ktime_add(deleted_wake_locks.stat.prevent_suspend_time, | ||
404 | lock->stat.prevent_suspend_time); | ||
405 | deleted_wake_locks.stat.max_time = | ||
406 | ktime_add(deleted_wake_locks.stat.max_time, | ||
407 | lock->stat.max_time); | ||
215 | } | 408 | } |
216 | if (timeout_ns) { | 409 | #endif |
217 | u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1; | 410 | list_del(&lock->link); |
411 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
412 | } | ||
413 | EXPORT_SYMBOL(wake_lock_destroy); | ||
218 | 414 | ||
219 | do_div(timeout_ms, NSEC_PER_MSEC); | 415 | static void wake_lock_internal( |
220 | __pm_wakeup_event(&wl->ws, timeout_ms); | 416 | struct wake_lock *lock, long timeout, int has_timeout) |
417 | { | ||
418 | int type; | ||
419 | unsigned long irqflags; | ||
420 | long expire_in; | ||
421 | |||
422 | spin_lock_irqsave(&list_lock, irqflags); | ||
423 | type = lock->flags & WAKE_LOCK_TYPE_MASK; | ||
424 | BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); | ||
425 | BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED)); | ||
426 | #ifdef CONFIG_WAKELOCK_STAT | ||
427 | if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) { | ||
428 | if (debug_mask & DEBUG_WAKEUP) | ||
429 | pr_info("wakeup wake lock: %s\n", lock->name); | ||
430 | wait_for_wakeup = 0; | ||
431 | lock->stat.wakeup_count++; | ||
432 | } | ||
433 | if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) && | ||
434 | (long)(lock->expires - jiffies) <= 0) { | ||
435 | wake_unlock_stat_locked(lock, 0); | ||
436 | lock->stat.last_time = ktime_get(); | ||
437 | } | ||
438 | #endif | ||
439 | if (!(lock->flags & WAKE_LOCK_ACTIVE)) { | ||
440 | lock->flags |= WAKE_LOCK_ACTIVE; | ||
441 | #ifdef CONFIG_WAKELOCK_STAT | ||
442 | lock->stat.last_time = ktime_get(); | ||
443 | #endif | ||
444 | } | ||
445 | list_del(&lock->link); | ||
446 | if (has_timeout) { | ||
447 | if (debug_mask & DEBUG_WAKE_LOCK) | ||
448 | pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n", | ||
449 | lock->name, type, timeout / HZ, | ||
450 | (timeout % HZ) * MSEC_PER_SEC / HZ); | ||
451 | lock->expires = jiffies + timeout; | ||
452 | lock->flags |= WAKE_LOCK_AUTO_EXPIRE; | ||
453 | list_add_tail(&lock->link, &active_wake_locks[type]); | ||
221 | } else { | 454 | } else { |
222 | __pm_stay_awake(&wl->ws); | 455 | if (debug_mask & DEBUG_WAKE_LOCK) |
456 | pr_info("wake_lock: %s, type %d\n", lock->name, type); | ||
457 | lock->expires = LONG_MAX; | ||
458 | lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE; | ||
459 | list_add(&lock->link, &active_wake_locks[type]); | ||
460 | } | ||
461 | if (type == WAKE_LOCK_SUSPEND) { | ||
462 | current_event_num++; | ||
463 | #ifdef CONFIG_WAKELOCK_STAT | ||
464 | if (lock == &main_wake_lock) | ||
465 | update_sleep_wait_stats_locked(1); | ||
466 | else if (!wake_lock_active(&main_wake_lock)) | ||
467 | update_sleep_wait_stats_locked(0); | ||
468 | #endif | ||
469 | if (has_timeout) | ||
470 | expire_in = has_wake_lock_locked(type); | ||
471 | else | ||
472 | expire_in = -1; | ||
473 | if (expire_in > 0) { | ||
474 | if (debug_mask & DEBUG_EXPIRE) | ||
475 | pr_info("wake_lock: %s, start expire timer, " | ||
476 | "%ld\n", lock->name, expire_in); | ||
477 | mod_timer(&expire_timer, jiffies + expire_in); | ||
478 | } else { | ||
479 | if (del_timer(&expire_timer)) | ||
480 | if (debug_mask & DEBUG_EXPIRE) | ||
481 | pr_info("wake_lock: %s, stop expire timer\n", | ||
482 | lock->name); | ||
483 | if (expire_in == 0) | ||
484 | queue_work(suspend_work_queue, &suspend_work); | ||
485 | } | ||
223 | } | 486 | } |
487 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
488 | } | ||
224 | 489 | ||
225 | wakelocks_lru_most_recent(wl); | 490 | void wake_lock(struct wake_lock *lock) |
491 | { | ||
492 | wake_lock_internal(lock, 0, 0); | ||
493 | } | ||
494 | EXPORT_SYMBOL(wake_lock); | ||
226 | 495 | ||
227 | out: | 496 | void wake_lock_timeout(struct wake_lock *lock, long timeout) |
228 | mutex_unlock(&wakelocks_lock); | 497 | { |
229 | return ret; | 498 | wake_lock_internal(lock, timeout, 1); |
230 | } | 499 | } |
500 | EXPORT_SYMBOL(wake_lock_timeout); | ||
231 | 501 | ||
232 | int pm_wake_unlock(const char *buf) | 502 | void wake_unlock(struct wake_lock *lock) |
233 | { | 503 | { |
234 | struct wakelock *wl; | 504 | int type; |
235 | size_t len; | 505 | unsigned long irqflags; |
236 | int ret = 0; | 506 | spin_lock_irqsave(&list_lock, irqflags); |
507 | type = lock->flags & WAKE_LOCK_TYPE_MASK; | ||
508 | #ifdef CONFIG_WAKELOCK_STAT | ||
509 | wake_unlock_stat_locked(lock, 0); | ||
510 | #endif | ||
511 | if (debug_mask & DEBUG_WAKE_LOCK) | ||
512 | pr_info("wake_unlock: %s\n", lock->name); | ||
513 | lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); | ||
514 | list_del(&lock->link); | ||
515 | list_add(&lock->link, &inactive_locks); | ||
516 | if (type == WAKE_LOCK_SUSPEND) { | ||
517 | long has_lock = has_wake_lock_locked(type); | ||
518 | if (has_lock > 0) { | ||
519 | if (debug_mask & DEBUG_EXPIRE) | ||
520 | pr_info("wake_unlock: %s, start expire timer, " | ||
521 | "%ld\n", lock->name, has_lock); | ||
522 | mod_timer(&expire_timer, jiffies + has_lock); | ||
523 | } else { | ||
524 | if (del_timer(&expire_timer)) | ||
525 | if (debug_mask & DEBUG_EXPIRE) | ||
526 | pr_info("wake_unlock: %s, stop expire " | ||
527 | "timer\n", lock->name); | ||
528 | if (has_lock == 0) | ||
529 | queue_work(suspend_work_queue, &suspend_work); | ||
530 | } | ||
531 | if (lock == &main_wake_lock) { | ||
532 | if (debug_mask & DEBUG_SUSPEND) | ||
533 | print_active_locks(WAKE_LOCK_SUSPEND); | ||
534 | #ifdef CONFIG_WAKELOCK_STAT | ||
535 | update_sleep_wait_stats_locked(0); | ||
536 | #endif | ||
537 | } | ||
538 | } | ||
539 | spin_unlock_irqrestore(&list_lock, irqflags); | ||
540 | } | ||
541 | EXPORT_SYMBOL(wake_unlock); | ||
237 | 542 | ||
238 | if (!capable(CAP_BLOCK_SUSPEND)) | 543 | int wake_lock_active(struct wake_lock *lock) |
239 | return -EPERM; | 544 | { |
545 | return !!(lock->flags & WAKE_LOCK_ACTIVE); | ||
546 | } | ||
547 | EXPORT_SYMBOL(wake_lock_active); | ||
548 | |||
549 | static int wakelock_stats_open(struct inode *inode, struct file *file) | ||
550 | { | ||
551 | return single_open(file, wakelock_stats_show, NULL); | ||
552 | } | ||
240 | 553 | ||
241 | len = strlen(buf); | 554 | static const struct file_operations wakelock_stats_fops = { |
242 | if (!len) | 555 | .owner = THIS_MODULE, |
243 | return -EINVAL; | 556 | .open = wakelock_stats_open, |
557 | .read = seq_read, | ||
558 | .llseek = seq_lseek, | ||
559 | .release = single_release, | ||
560 | }; | ||
244 | 561 | ||
245 | if (buf[len-1] == '\n') | 562 | static int __init wakelocks_init(void) |
246 | len--; | 563 | { |
564 | int ret; | ||
565 | int i; | ||
247 | 566 | ||
248 | if (!len) | 567 | for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++) |
249 | return -EINVAL; | 568 | INIT_LIST_HEAD(&active_wake_locks[i]); |
250 | 569 | ||
251 | mutex_lock(&wakelocks_lock); | 570 | #ifdef CONFIG_WAKELOCK_STAT |
571 | wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND, | ||
572 | "deleted_wake_locks"); | ||
573 | #endif | ||
574 | wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); | ||
575 | wake_lock(&main_wake_lock); | ||
576 | wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); | ||
577 | wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND, | ||
578 | "suspend_backoff"); | ||
579 | |||
580 | ret = platform_device_register(&power_device); | ||
581 | if (ret) { | ||
582 | pr_err("wakelocks_init: platform_device_register failed\n"); | ||
583 | goto err_platform_device_register; | ||
584 | } | ||
585 | ret = platform_driver_register(&power_driver); | ||
586 | if (ret) { | ||
587 | pr_err("wakelocks_init: platform_driver_register failed\n"); | ||
588 | goto err_platform_driver_register; | ||
589 | } | ||
252 | 590 | ||
253 | wl = wakelock_lookup_add(buf, len, false); | 591 | suspend_work_queue = create_singlethread_workqueue("suspend"); |
254 | if (IS_ERR(wl)) { | 592 | if (suspend_work_queue == NULL) { |
255 | ret = PTR_ERR(wl); | 593 | ret = -ENOMEM; |
256 | goto out; | 594 | goto err_suspend_work_queue; |
257 | } | 595 | } |
258 | __pm_relax(&wl->ws); | ||
259 | 596 | ||
260 | wakelocks_lru_most_recent(wl); | 597 | #ifdef CONFIG_WAKELOCK_STAT |
261 | wakelocks_gc(); | 598 | proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops); |
599 | #endif | ||
262 | 600 | ||
263 | out: | 601 | return 0; |
264 | mutex_unlock(&wakelocks_lock); | 602 | |
603 | err_suspend_work_queue: | ||
604 | platform_driver_unregister(&power_driver); | ||
605 | err_platform_driver_register: | ||
606 | platform_device_unregister(&power_device); | ||
607 | err_platform_device_register: | ||
608 | wake_lock_destroy(&suspend_backoff_lock); | ||
609 | wake_lock_destroy(&unknown_wakeup); | ||
610 | wake_lock_destroy(&main_wake_lock); | ||
611 | #ifdef CONFIG_WAKELOCK_STAT | ||
612 | wake_lock_destroy(&deleted_wake_locks); | ||
613 | #endif | ||
265 | return ret; | 614 | return ret; |
266 | } | 615 | } |
616 | |||
617 | static void __exit wakelocks_exit(void) | ||
618 | { | ||
619 | #ifdef CONFIG_WAKELOCK_STAT | ||
620 | remove_proc_entry("wakelocks", NULL); | ||
621 | #endif | ||
622 | destroy_workqueue(suspend_work_queue); | ||
623 | platform_driver_unregister(&power_driver); | ||
624 | platform_device_unregister(&power_device); | ||
625 | wake_lock_destroy(&suspend_backoff_lock); | ||
626 | wake_lock_destroy(&unknown_wakeup); | ||
627 | wake_lock_destroy(&main_wake_lock); | ||
628 | #ifdef CONFIG_WAKELOCK_STAT | ||
629 | wake_lock_destroy(&deleted_wake_locks); | ||
630 | #endif | ||
631 | } | ||
632 | |||
633 | core_initcall(wakelocks_init); | ||
634 | module_exit(wakelocks_exit); | ||