aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /kernel/power
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig253
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/block_io.c2
-rw-r--r--kernel/power/hibernate.c331
-rw-r--r--kernel/power/main.c35
-rw-r--r--kernel/power/nvs.c136
-rw-r--r--kernel/power/power.h14
-rw-r--r--kernel/power/process.c23
-rw-r--r--kernel/power/snapshot.c69
-rw-r--r--kernel/power/suspend.c21
-rw-r--r--kernel/power/swap.c336
-rw-r--r--kernel/power/user.c13
12 files changed, 755 insertions, 483 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ca6066a6952e..87f4d24b55b0 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -1,128 +1,12 @@
1config PM
2 bool "Power Management support"
3 depends on !IA64_HP_SIM
4 ---help---
5 "Power Management" means that parts of your computer are shut
6 off or put into a power conserving "sleep" mode if they are not
7 being used. There are two competing standards for doing this: APM
8 and ACPI. If you want to use either one, say Y here and then also
9 to the requisite support below.
10
11 Power Management is most important for battery powered laptop
12 computers; if you have a laptop, check out the Linux Laptop home
13 page on the WWW at <http://www.linux-on-laptops.com/> or
14 Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
15 and the Battery Powered Linux mini-HOWTO, available from
16 <http://www.tldp.org/docs.html#howto>.
17
18 Note that, even if you say N here, Linux on the x86 architecture
19 will issue the hlt instruction if nothing is to be done, thereby
20 sending the processor to sleep and saving power.
21
22config PM_DEBUG
23 bool "Power Management Debug Support"
24 depends on PM
25 ---help---
26 This option enables various debugging support in the Power Management
27 code. This is helpful when debugging and reporting PM bugs, like
28 suspend support.
29
30config PM_ADVANCED_DEBUG
31 bool "Extra PM attributes in sysfs for low-level debugging/testing"
32 depends on PM_DEBUG
33 default n
34 ---help---
35 Add extra sysfs attributes allowing one to access some Power Management
36 fields of device objects from user space. If you are not a kernel
37 developer interested in debugging/testing Power Management, say "no".
38
39config PM_VERBOSE
40 bool "Verbose Power Management debugging"
41 depends on PM_DEBUG
42 default n
43 ---help---
44 This option enables verbose messages from the Power Management code.
45
46config CAN_PM_TRACE
47 def_bool y
48 depends on PM_DEBUG && PM_SLEEP && EXPERIMENTAL
49
50config PM_TRACE
51 bool
52 help
53 This enables code to save the last PM event point across
54 reboot. The architecture needs to support this, x86 for
55 example does by saving things in the RTC, see below.
56
57 The architecture specific code must provide the extern
58 functions from <linux/resume-trace.h> as well as the
59 <asm/resume-trace.h> header with a TRACE_RESUME() macro.
60
61 The way the information is presented is architecture-
62 dependent, x86 will print the information during a
63 late_initcall.
64
65config PM_TRACE_RTC
66 bool "Suspend/resume event tracing"
67 depends on CAN_PM_TRACE
68 depends on X86
69 select PM_TRACE
70 default n
71 ---help---
72 This enables some cheesy code to save the last PM event point in the
73 RTC across reboots, so that you can debug a machine that just hangs
74 during suspend (or more commonly, during resume).
75
76 To use this debugging feature you should attempt to suspend the
77 machine, reboot it and then run
78
79 dmesg -s 1000000 | grep 'hash matches'
80
81 CAUTION: this option will cause your machine's real-time clock to be
82 set to an invalid time after a resume.
83
84config PM_SLEEP_SMP
85 bool
86 depends on SMP
87 depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
88 depends on PM_SLEEP
89 select HOTPLUG_CPU
90 default y
91
92config PM_SLEEP
93 bool
94 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
95 default y
96
97config PM_SLEEP_ADVANCED_DEBUG
98 bool
99 depends on PM_ADVANCED_DEBUG
100 default n
101
102config SUSPEND_NVS
103 bool
104
105config SUSPEND 1config SUSPEND
106 bool "Suspend to RAM and standby" 2 bool "Suspend to RAM and standby"
107 depends on PM && ARCH_SUSPEND_POSSIBLE 3 depends on ARCH_SUSPEND_POSSIBLE
108 select SUSPEND_NVS if HAS_IOMEM
109 default y 4 default y
110 ---help--- 5 ---help---
111 Allow the system to enter sleep states in which main memory is 6 Allow the system to enter sleep states in which main memory is
112 powered and thus its contents are preserved, such as the 7 powered and thus its contents are preserved, such as the
113 suspend-to-RAM state (e.g. the ACPI S3 state). 8 suspend-to-RAM state (e.g. the ACPI S3 state).
114 9
115config PM_TEST_SUSPEND
116 bool "Test suspend/resume and wakealarm during bootup"
117 depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
118 ---help---
119 This option will let you suspend your machine during bootup, and
120 make it wake up a few seconds later using an RTC wakeup alarm.
121 Enable this with a kernel parameter like "test_suspend=mem".
122
123 You probably want to have your system's RTC driver statically
124 linked, ensuring that it's available when this test runs.
125
126config SUSPEND_FREEZER 10config SUSPEND_FREEZER
127 bool "Enable freezer for suspend to RAM/standby" \ 11 bool "Enable freezer for suspend to RAM/standby" \
128 if ARCH_WANTS_FREEZER_CONTROL || BROKEN 12 if ARCH_WANTS_FREEZER_CONTROL || BROKEN
@@ -134,10 +18,15 @@ config SUSPEND_FREEZER
134 18
135 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
136 20
21config HIBERNATE_CALLBACKS
22 bool
23
137config HIBERNATION 24config HIBERNATION
138 bool "Hibernation (aka 'suspend to disk')" 25 bool "Hibernation (aka 'suspend to disk')"
139 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 26 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
140 select SUSPEND_NVS if HAS_IOMEM 27 select HIBERNATE_CALLBACKS
28 select LZO_COMPRESS
29 select LZO_DECOMPRESS
141 ---help--- 30 ---help---
142 Enable the suspend to disk (STD) functionality, which is usually 31 Enable the suspend to disk (STD) functionality, which is usually
143 called "hibernation" in user interfaces. STD checkpoints the 32 called "hibernation" in user interfaces. STD checkpoints the
@@ -198,6 +87,100 @@ config PM_STD_PARTITION
198 suspended image to. It will simply pick the first available swap 87 suspended image to. It will simply pick the first available swap
199 device. 88 device.
200 89
90config PM_SLEEP
91 def_bool y
92 depends on SUSPEND || HIBERNATE_CALLBACKS
93
94config PM_SLEEP_SMP
95 def_bool y
96 depends on SMP
97 depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
98 depends on PM_SLEEP
99 select HOTPLUG
100 select HOTPLUG_CPU
101
102config PM_RUNTIME
103 bool "Run-time PM core functionality"
104 depends on !IA64_HP_SIM
105 ---help---
106 Enable functionality allowing I/O devices to be put into energy-saving
107 (low power) states at run time (or autosuspended) after a specified
108 period of inactivity and woken up in response to a hardware-generated
109 wake-up event or a driver's request.
110
111 Hardware support is generally required for this functionality to work
112 and the bus type drivers of the buses the devices are on are
113 responsible for the actual handling of the autosuspend requests and
114 wake-up events.
115
116config PM
117 def_bool y
118 depends on PM_SLEEP || PM_RUNTIME
119
120config PM_DEBUG
121 bool "Power Management Debug Support"
122 depends on PM
123 ---help---
124 This option enables various debugging support in the Power Management
125 code. This is helpful when debugging and reporting PM bugs, like
126 suspend support.
127
128config PM_ADVANCED_DEBUG
129 bool "Extra PM attributes in sysfs for low-level debugging/testing"
130 depends on PM_DEBUG
131 ---help---
132 Add extra sysfs attributes allowing one to access some Power Management
133 fields of device objects from user space. If you are not a kernel
134 developer interested in debugging/testing Power Management, say "no".
135
136config PM_TEST_SUSPEND
137 bool "Test suspend/resume and wakealarm during bootup"
138 depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
139 ---help---
140 This option will let you suspend your machine during bootup, and
141 make it wake up a few seconds later using an RTC wakeup alarm.
142 Enable this with a kernel parameter like "test_suspend=mem".
143
144 You probably want to have your system's RTC driver statically
145 linked, ensuring that it's available when this test runs.
146
147config CAN_PM_TRACE
148 def_bool y
149 depends on PM_DEBUG && PM_SLEEP
150
151config PM_TRACE
152 bool
153 help
154 This enables code to save the last PM event point across
155 reboot. The architecture needs to support this, x86 for
156 example does by saving things in the RTC, see below.
157
158 The architecture specific code must provide the extern
159 functions from <linux/resume-trace.h> as well as the
160 <asm/resume-trace.h> header with a TRACE_RESUME() macro.
161
162 The way the information is presented is architecture-
163 dependent, x86 will print the information during a
164 late_initcall.
165
166config PM_TRACE_RTC
167 bool "Suspend/resume event tracing"
168 depends on CAN_PM_TRACE
169 depends on X86
170 select PM_TRACE
171 ---help---
172 This enables some cheesy code to save the last PM event point in the
173 RTC across reboots, so that you can debug a machine that just hangs
174 during suspend (or more commonly, during resume).
175
176 To use this debugging feature you should attempt to suspend the
177 machine, reboot it and then run
178
179 dmesg -s 1000000 | grep 'hash matches'
180
181 CAUTION: this option will cause your machine's real-time clock to be
182 set to an invalid time after a resume.
183
201config APM_EMULATION 184config APM_EMULATION
202 tristate "Advanced Power Management Emulation" 185 tristate "Advanced Power Management Emulation"
203 depends on PM && SYS_SUPPORTS_APM_EMULATION 186 depends on PM && SYS_SUPPORTS_APM_EMULATION
@@ -224,21 +207,23 @@ config APM_EMULATION
224 anything, try disabling/enabling this option (or disabling/enabling 207 anything, try disabling/enabling this option (or disabling/enabling
225 APM in your BIOS). 208 APM in your BIOS).
226 209
227config PM_RUNTIME 210config ARCH_HAS_OPP
228 bool "Run-time PM core functionality" 211 bool
229 depends on PM 212
213config PM_OPP
214 bool "Operating Performance Point (OPP) Layer library"
215 depends on ARCH_HAS_OPP
230 ---help--- 216 ---help---
231 Enable functionality allowing I/O devices to be put into energy-saving 217 SOCs have a standard set of tuples consisting of frequency and
232 (low power) states at run time (or autosuspended) after a specified 218 voltage pairs that the device will support per voltage domain. This
233 period of inactivity and woken up in response to a hardware-generated 219 is called Operating Performance Point or OPP. The actual definitions
234 wake-up event or a driver's request. 220 of OPP varies over silicon within the same family of devices.
235 221
236 Hardware support is generally required for this functionality to work 222 OPP layer organizes the data internally using device pointers
237 and the bus type drivers of the buses the devices are on are 223 representing individual voltage domains and provides SOC
238 responsible for the actual handling of the autosuspend requests and 224 implementations a ready to use framework to manage OPPs.
239 wake-up events. 225 For more information, read <file:Documentation/power/opp.txt>
240 226
241config PM_OPS 227config PM_RUNTIME_CLK
242 bool 228 def_bool y
243 depends on PM_SLEEP || PM_RUNTIME 229 depends on PM_RUNTIME && HAVE_CLK
244 default y
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index f9063c6b185d..c5ebc6a90643 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,7 +1,5 @@
1 1
2ifeq ($(CONFIG_PM_DEBUG),y) 2ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
3EXTRA_CFLAGS += -DDEBUG
4endif
5 3
6obj-$(CONFIG_PM) += main.o 4obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 5obj-$(CONFIG_PM_SLEEP) += console.o
@@ -10,6 +8,5 @@ obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 8obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 9obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
12 block_io.o 10 block_io.o
13obj-$(CONFIG_SUSPEND_NVS) += nvs.o
14 11
15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 12obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index 83bbc7c02df9..d09dd10c5a5e 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -28,7 +28,7 @@
28static int submit(int rw, struct block_device *bdev, sector_t sector, 28static int submit(int rw, struct block_device *bdev, sector_t sector,
29 struct page *page, struct bio **bio_chain) 29 struct page *page, struct bio **bio_chain)
30{ 30{
31 const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; 31 const int bio_rw = rw | REQ_SYNC;
32 struct bio *bio; 32 struct bio *bio;
33 33
34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8dc31e02ae12..8f7b1db1ece1 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -23,12 +23,13 @@
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <linux/syscore_ops.h>
26#include <scsi/scsi_scan.h> 27#include <scsi/scsi_scan.h>
27#include <asm/suspend.h>
28 28
29#include "power.h" 29#include "power.h"
30 30
31 31
32static int nocompress = 0;
32static int noresume = 0; 33static int noresume = 0;
33static char resume_file[256] = CONFIG_PM_STD_PARTITION; 34static char resume_file[256] = CONFIG_PM_STD_PARTITION;
34dev_t swsusp_resume_device; 35dev_t swsusp_resume_device;
@@ -50,18 +51,17 @@ enum {
50 51
51static int hibernation_mode = HIBERNATION_SHUTDOWN; 52static int hibernation_mode = HIBERNATION_SHUTDOWN;
52 53
53static struct platform_hibernation_ops *hibernation_ops; 54static const struct platform_hibernation_ops *hibernation_ops;
54 55
55/** 56/**
56 * hibernation_set_ops - set the global hibernate operations 57 * hibernation_set_ops - Set the global hibernate operations.
57 * @ops: the hibernation operations to use in subsequent hibernation transitions 58 * @ops: Hibernation operations to use in subsequent hibernation transitions.
58 */ 59 */
59 60void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60void hibernation_set_ops(struct platform_hibernation_ops *ops)
61{ 61{
62 if (ops && !(ops->begin && ops->end && ops->pre_snapshot 62 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
63 && ops->prepare && ops->finish && ops->enter && ops->pre_restore 63 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64 && ops->restore_cleanup)) { 64 && ops->restore_cleanup && ops->leave)) {
65 WARN_ON(1); 65 WARN_ON(1);
66 return; 66 return;
67 } 67 }
@@ -113,10 +113,9 @@ static int hibernation_test(int level) { return 0; }
113#endif /* !CONFIG_PM_DEBUG */ 113#endif /* !CONFIG_PM_DEBUG */
114 114
115/** 115/**
116 * platform_begin - tell the platform driver that we're starting 116 * platform_begin - Call platform to start hibernation.
117 * hibernation 117 * @platform_mode: Whether or not to use the platform driver.
118 */ 118 */
119
120static int platform_begin(int platform_mode) 119static int platform_begin(int platform_mode)
121{ 120{
122 return (platform_mode && hibernation_ops) ? 121 return (platform_mode && hibernation_ops) ?
@@ -124,10 +123,9 @@ static int platform_begin(int platform_mode)
124} 123}
125 124
126/** 125/**
127 * platform_end - tell the platform driver that we've entered the 126 * platform_end - Call platform to finish transition to the working state.
128 * working state 127 * @platform_mode: Whether or not to use the platform driver.
129 */ 128 */
130
131static void platform_end(int platform_mode) 129static void platform_end(int platform_mode)
132{ 130{
133 if (platform_mode && hibernation_ops) 131 if (platform_mode && hibernation_ops)
@@ -135,8 +133,11 @@ static void platform_end(int platform_mode)
135} 133}
136 134
137/** 135/**
138 * platform_pre_snapshot - prepare the machine for hibernation using the 136 * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
139 * platform driver if so configured and return an error code if it fails 137 * @platform_mode: Whether or not to use the platform driver.
138 *
139 * Use the platform driver to prepare the system for creating a hibernate image,
140 * if so configured, and return an error code if that fails.
140 */ 141 */
141 142
142static int platform_pre_snapshot(int platform_mode) 143static int platform_pre_snapshot(int platform_mode)
@@ -146,10 +147,14 @@ static int platform_pre_snapshot(int platform_mode)
146} 147}
147 148
148/** 149/**
149 * platform_leave - prepare the machine for switching to the normal mode 150 * platform_leave - Call platform to prepare a transition to the working state.
150 * of operation using the platform driver (called with interrupts disabled) 151 * @platform_mode: Whether or not to use the platform driver.
152 *
153 * Use the platform driver prepare to prepare the machine for switching to the
154 * normal mode of operation.
155 *
156 * This routine is called on one CPU with interrupts disabled.
151 */ 157 */
152
153static void platform_leave(int platform_mode) 158static void platform_leave(int platform_mode)
154{ 159{
155 if (platform_mode && hibernation_ops) 160 if (platform_mode && hibernation_ops)
@@ -157,10 +162,14 @@ static void platform_leave(int platform_mode)
157} 162}
158 163
159/** 164/**
160 * platform_finish - switch the machine to the normal mode of operation 165 * platform_finish - Call platform to switch the system to the working state.
161 * using the platform driver (must be called after platform_prepare()) 166 * @platform_mode: Whether or not to use the platform driver.
167 *
168 * Use the platform driver to switch the machine to the normal mode of
169 * operation.
170 *
171 * This routine must be called after platform_prepare().
162 */ 172 */
163
164static void platform_finish(int platform_mode) 173static void platform_finish(int platform_mode)
165{ 174{
166 if (platform_mode && hibernation_ops) 175 if (platform_mode && hibernation_ops)
@@ -168,11 +177,15 @@ static void platform_finish(int platform_mode)
168} 177}
169 178
170/** 179/**
171 * platform_pre_restore - prepare the platform for the restoration from a 180 * platform_pre_restore - Prepare for hibernate image restoration.
172 * hibernation image. If the restore fails after this function has been 181 * @platform_mode: Whether or not to use the platform driver.
173 * called, platform_restore_cleanup() must be called. 182 *
183 * Use the platform driver to prepare the system for resume from a hibernation
184 * image.
185 *
186 * If the restore fails after this function has been called,
187 * platform_restore_cleanup() must be called.
174 */ 188 */
175
176static int platform_pre_restore(int platform_mode) 189static int platform_pre_restore(int platform_mode)
177{ 190{
178 return (platform_mode && hibernation_ops) ? 191 return (platform_mode && hibernation_ops) ?
@@ -180,12 +193,16 @@ static int platform_pre_restore(int platform_mode)
180} 193}
181 194
182/** 195/**
183 * platform_restore_cleanup - switch the platform to the normal mode of 196 * platform_restore_cleanup - Switch to the working state after failing restore.
184 * operation after a failing restore. If platform_pre_restore() has been 197 * @platform_mode: Whether or not to use the platform driver.
185 * called before the failing restore, this function must be called too, 198 *
186 * regardless of the result of platform_pre_restore(). 199 * Use the platform driver to switch the system to the normal mode of operation
200 * after a failing restore.
201 *
202 * If platform_pre_restore() has been called before the failing restore, this
203 * function must be called too, regardless of the result of
204 * platform_pre_restore().
187 */ 205 */
188
189static void platform_restore_cleanup(int platform_mode) 206static void platform_restore_cleanup(int platform_mode)
190{ 207{
191 if (platform_mode && hibernation_ops) 208 if (platform_mode && hibernation_ops)
@@ -193,10 +210,9 @@ static void platform_restore_cleanup(int platform_mode)
193} 210}
194 211
195/** 212/**
196 * platform_recover - recover the platform from a failure to suspend 213 * platform_recover - Recover from a failure to suspend devices.
197 * devices. 214 * @platform_mode: Whether or not to use the platform driver.
198 */ 215 */
199
200static void platform_recover(int platform_mode) 216static void platform_recover(int platform_mode)
201{ 217{
202 if (platform_mode && hibernation_ops && hibernation_ops->recover) 218 if (platform_mode && hibernation_ops && hibernation_ops->recover)
@@ -204,13 +220,12 @@ static void platform_recover(int platform_mode)
204} 220}
205 221
206/** 222/**
207 * swsusp_show_speed - print the time elapsed between two events. 223 * swsusp_show_speed - Print time elapsed between two events during hibernation.
208 * @start: Starting event. 224 * @start: Starting event.
209 * @stop: Final event. 225 * @stop: Final event.
210 * @nr_pages - number of pages processed between @start and @stop 226 * @nr_pages: Number of memory pages processed between @start and @stop.
211 * @msg - introductory message to print 227 * @msg: Additional diagnostic message to print.
212 */ 228 */
213
214void swsusp_show_speed(struct timeval *start, struct timeval *stop, 229void swsusp_show_speed(struct timeval *start, struct timeval *stop,
215 unsigned nr_pages, char *msg) 230 unsigned nr_pages, char *msg)
216{ 231{
@@ -233,25 +248,18 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
233} 248}
234 249
235/** 250/**
236 * create_image - freeze devices that need to be frozen with interrupts 251 * create_image - Create a hibernation image.
237 * off, create the hibernation image and thaw those devices. Control 252 * @platform_mode: Whether or not to use the platform driver.
238 * reappears in this routine after a restore. 253 *
254 * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
255 * and execute the drivers' .thaw_noirq() callbacks.
256 *
257 * Control reappears in this routine after the subsequent restore.
239 */ 258 */
240
241static int create_image(int platform_mode) 259static int create_image(int platform_mode)
242{ 260{
243 int error; 261 int error;
244 262
245 error = arch_prepare_suspend();
246 if (error)
247 return error;
248
249 /* At this point, dpm_suspend_start() has been called, but *not*
250 * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
251 * Otherwise, drivers for some devices (e.g. interrupt controllers)
252 * become desynchronized with the actual state of the hardware
253 * at resume time, and evil weirdness ensues.
254 */
255 error = dpm_suspend_noirq(PMSG_FREEZE); 263 error = dpm_suspend_noirq(PMSG_FREEZE);
256 if (error) { 264 if (error) {
257 printk(KERN_ERR "PM: Some devices failed to power down, " 265 printk(KERN_ERR "PM: Some devices failed to power down, "
@@ -270,14 +278,14 @@ static int create_image(int platform_mode)
270 278
271 local_irq_disable(); 279 local_irq_disable();
272 280
273 error = sysdev_suspend(PMSG_FREEZE); 281 error = syscore_suspend();
274 if (error) { 282 if (error) {
275 printk(KERN_ERR "PM: Some system devices failed to power down, " 283 printk(KERN_ERR "PM: Some system devices failed to power down, "
276 "aborting hibernation\n"); 284 "aborting hibernation\n");
277 goto Enable_irqs; 285 goto Enable_irqs;
278 } 286 }
279 287
280 if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) 288 if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
281 goto Power_up; 289 goto Power_up;
282 290
283 in_suspend = 1; 291 in_suspend = 1;
@@ -294,10 +302,7 @@ static int create_image(int platform_mode)
294 } 302 }
295 303
296 Power_up: 304 Power_up:
297 sysdev_resume(); 305 syscore_resume();
298 /* NOTE: dpm_resume_noirq() is just a resume() for devices
299 * that suspended with irqs off ... no overall powerup.
300 */
301 306
302 Enable_irqs: 307 Enable_irqs:
303 local_irq_enable(); 308 local_irq_enable();
@@ -315,31 +320,32 @@ static int create_image(int platform_mode)
315} 320}
316 321
317/** 322/**
318 * hibernation_snapshot - quiesce devices and create the hibernation 323 * hibernation_snapshot - Quiesce devices and create a hibernation image.
319 * snapshot image. 324 * @platform_mode: If set, use platform driver to prepare for the transition.
320 * @platform_mode - if set, use the platform driver, if available, to
321 * prepare the platform firmware for the power transition.
322 * 325 *
323 * Must be called with pm_mutex held 326 * This routine must be called with pm_mutex held.
324 */ 327 */
325
326int hibernation_snapshot(int platform_mode) 328int hibernation_snapshot(int platform_mode)
327{ 329{
330 pm_message_t msg = PMSG_RECOVER;
328 int error; 331 int error;
329 gfp_t saved_mask;
330 332
331 error = platform_begin(platform_mode); 333 error = platform_begin(platform_mode);
332 if (error) 334 if (error)
333 goto Close; 335 goto Close;
334 336
337 error = dpm_prepare(PMSG_FREEZE);
338 if (error)
339 goto Complete_devices;
340
335 /* Preallocate image memory before shutting down devices. */ 341 /* Preallocate image memory before shutting down devices. */
336 error = hibernate_preallocate_memory(); 342 error = hibernate_preallocate_memory();
337 if (error) 343 if (error)
338 goto Close; 344 goto Complete_devices;
339 345
340 suspend_console(); 346 suspend_console();
341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 347 pm_restrict_gfp_mask();
342 error = dpm_suspend_start(PMSG_FREEZE); 348 error = dpm_suspend(PMSG_FREEZE);
343 if (error) 349 if (error)
344 goto Recover_platform; 350 goto Recover_platform;
345 351
@@ -347,17 +353,27 @@ int hibernation_snapshot(int platform_mode)
347 goto Recover_platform; 353 goto Recover_platform;
348 354
349 error = create_image(platform_mode); 355 error = create_image(platform_mode);
350 /* Control returns here after successful restore */ 356 /*
357 * Control returns here (1) after the image has been created or the
358 * image creation has failed and (2) after a successful restore.
359 */
351 360
352 Resume_devices: 361 Resume_devices:
353 /* We may need to release the preallocated image pages here. */ 362 /* We may need to release the preallocated image pages here. */
354 if (error || !in_suspend) 363 if (error || !in_suspend)
355 swsusp_free(); 364 swsusp_free();
356 365
357 dpm_resume_end(in_suspend ? 366 msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE;
358 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 367 dpm_resume(msg);
359 set_gfp_allowed_mask(saved_mask); 368
369 if (error || !in_suspend)
370 pm_restore_gfp_mask();
371
360 resume_console(); 372 resume_console();
373
374 Complete_devices:
375 dpm_complete(msg);
376
361 Close: 377 Close:
362 platform_end(platform_mode); 378 platform_end(platform_mode);
363 return error; 379 return error;
@@ -368,13 +384,14 @@ int hibernation_snapshot(int platform_mode)
368} 384}
369 385
370/** 386/**
371 * resume_target_kernel - prepare devices that need to be suspended with 387 * resume_target_kernel - Restore system state from a hibernation image.
372 * interrupts off, restore the contents of highmem that have not been 388 * @platform_mode: Whether or not to use the platform driver.
373 * restored yet from the image and run the low level code that will restore 389 *
374 * the remaining contents of memory and switch to the just restored target 390 * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
375 * kernel. 391 * highmem that have not been restored yet from the image and run the low-level
392 * code that will restore the remaining contents of memory and switch to the
393 * just restored target kernel.
376 */ 394 */
377
378static int resume_target_kernel(bool platform_mode) 395static int resume_target_kernel(bool platform_mode)
379{ 396{
380 int error; 397 int error;
@@ -396,34 +413,36 @@ static int resume_target_kernel(bool platform_mode)
396 413
397 local_irq_disable(); 414 local_irq_disable();
398 415
399 error = sysdev_suspend(PMSG_QUIESCE); 416 error = syscore_suspend();
400 if (error) 417 if (error)
401 goto Enable_irqs; 418 goto Enable_irqs;
402 419
403 /* We'll ignore saved state, but this gets preempt count (etc) right */
404 save_processor_state(); 420 save_processor_state();
405 error = restore_highmem(); 421 error = restore_highmem();
406 if (!error) { 422 if (!error) {
407 error = swsusp_arch_resume(); 423 error = swsusp_arch_resume();
408 /* 424 /*
409 * The code below is only ever reached in case of a failure. 425 * The code below is only ever reached in case of a failure.
410 * Otherwise execution continues at place where 426 * Otherwise, execution continues at the place where
411 * swsusp_arch_suspend() was called 427 * swsusp_arch_suspend() was called.
412 */ 428 */
413 BUG_ON(!error); 429 BUG_ON(!error);
414 /* This call to restore_highmem() undos the previous one */ 430 /*
431 * This call to restore_highmem() reverts the changes made by
432 * the previous one.
433 */
415 restore_highmem(); 434 restore_highmem();
416 } 435 }
417 /* 436 /*
418 * The only reason why swsusp_arch_resume() can fail is memory being 437 * The only reason why swsusp_arch_resume() can fail is memory being
419 * very tight, so we have to free it as soon as we can to avoid 438 * very tight, so we have to free it as soon as we can to avoid
420 * subsequent failures 439 * subsequent failures.
421 */ 440 */
422 swsusp_free(); 441 swsusp_free();
423 restore_processor_state(); 442 restore_processor_state();
424 touch_softlockup_watchdog(); 443 touch_softlockup_watchdog();
425 444
426 sysdev_resume(); 445 syscore_resume();
427 446
428 Enable_irqs: 447 Enable_irqs:
429 local_irq_enable(); 448 local_irq_enable();
@@ -440,42 +459,36 @@ static int resume_target_kernel(bool platform_mode)
440} 459}
441 460
442/** 461/**
443 * hibernation_restore - quiesce devices and restore the hibernation 462 * hibernation_restore - Quiesce devices and restore from a hibernation image.
444 * snapshot image. If successful, control returns in hibernation_snaphot() 463 * @platform_mode: If set, use platform driver to prepare for the transition.
445 * @platform_mode - if set, use the platform driver, if available, to
446 * prepare the platform firmware for the transition.
447 * 464 *
448 * Must be called with pm_mutex held 465 * This routine must be called with pm_mutex held. If it is successful, control
466 * reappears in the restored target kernel in hibernation_snaphot().
449 */ 467 */
450
451int hibernation_restore(int platform_mode) 468int hibernation_restore(int platform_mode)
452{ 469{
453 int error; 470 int error;
454 gfp_t saved_mask;
455 471
456 pm_prepare_console(); 472 pm_prepare_console();
457 suspend_console(); 473 suspend_console();
458 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 474 pm_restrict_gfp_mask();
459 error = dpm_suspend_start(PMSG_QUIESCE); 475 error = dpm_suspend_start(PMSG_QUIESCE);
460 if (!error) { 476 if (!error) {
461 error = resume_target_kernel(platform_mode); 477 error = resume_target_kernel(platform_mode);
462 dpm_resume_end(PMSG_RECOVER); 478 dpm_resume_end(PMSG_RECOVER);
463 } 479 }
464 set_gfp_allowed_mask(saved_mask); 480 pm_restore_gfp_mask();
465 resume_console(); 481 resume_console();
466 pm_restore_console(); 482 pm_restore_console();
467 return error; 483 return error;
468} 484}
469 485
470/** 486/**
471 * hibernation_platform_enter - enter the hibernation state using the 487 * hibernation_platform_enter - Power off the system using the platform driver.
472 * platform driver (if available)
473 */ 488 */
474
475int hibernation_platform_enter(void) 489int hibernation_platform_enter(void)
476{ 490{
477 int error; 491 int error;
478 gfp_t saved_mask;
479 492
480 if (!hibernation_ops) 493 if (!hibernation_ops)
481 return -ENOSYS; 494 return -ENOSYS;
@@ -491,7 +504,6 @@ int hibernation_platform_enter(void)
491 504
492 entering_platform_hibernation = true; 505 entering_platform_hibernation = true;
493 suspend_console(); 506 suspend_console();
494 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
495 error = dpm_suspend_start(PMSG_HIBERNATE); 507 error = dpm_suspend_start(PMSG_HIBERNATE);
496 if (error) { 508 if (error) {
497 if (hibernation_ops->recover) 509 if (hibernation_ops->recover)
@@ -512,8 +524,8 @@ int hibernation_platform_enter(void)
512 goto Platform_finish; 524 goto Platform_finish;
513 525
514 local_irq_disable(); 526 local_irq_disable();
515 sysdev_suspend(PMSG_HIBERNATE); 527 syscore_suspend();
516 if (!pm_check_wakeup_events()) { 528 if (pm_wakeup_pending()) {
517 error = -EAGAIN; 529 error = -EAGAIN;
518 goto Power_up; 530 goto Power_up;
519 } 531 }
@@ -523,7 +535,7 @@ int hibernation_platform_enter(void)
523 while (1); 535 while (1);
524 536
525 Power_up: 537 Power_up:
526 sysdev_resume(); 538 syscore_resume();
527 local_irq_enable(); 539 local_irq_enable();
528 enable_nonboot_cpus(); 540 enable_nonboot_cpus();
529 541
@@ -535,7 +547,6 @@ int hibernation_platform_enter(void)
535 Resume_devices: 547 Resume_devices:
536 entering_platform_hibernation = false; 548 entering_platform_hibernation = false;
537 dpm_resume_end(PMSG_RESTORE); 549 dpm_resume_end(PMSG_RESTORE);
538 set_gfp_allowed_mask(saved_mask);
539 resume_console(); 550 resume_console();
540 551
541 Close: 552 Close:
@@ -545,12 +556,12 @@ int hibernation_platform_enter(void)
545} 556}
546 557
547/** 558/**
548 * power_down - Shut the machine down for hibernation. 559 * power_down - Shut the machine down for hibernation.
549 * 560 *
550 * Use the platform driver, if configured so; otherwise try 561 * Use the platform driver, if configured, to put the system into the sleep
551 * to power off or reboot. 562 * state corresponding to hibernation, or try to power it off or reboot,
563 * depending on the value of hibernation_mode.
552 */ 564 */
553
554static void power_down(void) 565static void power_down(void)
555{ 566{
556 switch (hibernation_mode) { 567 switch (hibernation_mode) {
@@ -587,9 +598,8 @@ static int prepare_processes(void)
587} 598}
588 599
589/** 600/**
590 * hibernate - The granpappy of the built-in hibernation management 601 * hibernate - Carry out system hibernation, including saving the image.
591 */ 602 */
592
593int hibernate(void) 603int hibernate(void)
594{ 604{
595 int error; 605 int error;
@@ -638,11 +648,15 @@ int hibernate(void)
638 648
639 if (hibernation_mode == HIBERNATION_PLATFORM) 649 if (hibernation_mode == HIBERNATION_PLATFORM)
640 flags |= SF_PLATFORM_MODE; 650 flags |= SF_PLATFORM_MODE;
651 if (nocompress)
652 flags |= SF_NOCOMPRESS_MODE;
641 pr_debug("PM: writing image.\n"); 653 pr_debug("PM: writing image.\n");
642 error = swsusp_write(flags); 654 error = swsusp_write(flags);
643 swsusp_free(); 655 swsusp_free();
644 if (!error) 656 if (!error)
645 power_down(); 657 power_down();
658 in_suspend = 0;
659 pm_restore_gfp_mask();
646 } else { 660 } else {
647 pr_debug("PM: Image restored successfully.\n"); 661 pr_debug("PM: Image restored successfully.\n");
648 } 662 }
@@ -663,17 +677,20 @@ int hibernate(void)
663 677
664 678
665/** 679/**
666 * software_resume - Resume from a saved image. 680 * software_resume - Resume from a saved hibernation image.
681 *
682 * This routine is called as a late initcall, when all devices have been
683 * discovered and initialized already.
667 * 684 *
668 * Called as a late_initcall (so all devices are discovered and 685 * The image reading code is called to see if there is a hibernation image
669 * initialized), we call swsusp to see if we have a saved image or not. 686 * available for reading. If that is the case, devices are quiesced and the
670 * If so, we quiesce devices, the restore the saved image. We will 687 * contents of memory is restored from the saved image.
671 * return above (in hibernate() ) if everything goes well.
672 * Otherwise, we fail gracefully and return to the normally
673 * scheduled program.
674 * 688 *
689 * If this is successful, control reappears in the restored target kernel in
690 * hibernation_snaphot() which returns to hibernate(). Otherwise, the routine
691 * attempts to recover gracefully and make the kernel return to the normal mode
692 * of operation.
675 */ 693 */
676
677static int software_resume(void) 694static int software_resume(void)
678{ 695{
679 int error; 696 int error;
@@ -705,7 +722,7 @@ static int software_resume(void)
705 goto Unlock; 722 goto Unlock;
706 } 723 }
707 724
708 pr_debug("PM: Checking image partition %s\n", resume_file); 725 pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
709 726
710 /* Check if the device is there */ 727 /* Check if the device is there */
711 swsusp_resume_device = name_to_dev_t(resume_file); 728 swsusp_resume_device = name_to_dev_t(resume_file);
@@ -730,10 +747,10 @@ static int software_resume(void)
730 } 747 }
731 748
732 Check_image: 749 Check_image:
733 pr_debug("PM: Resume from partition %d:%d\n", 750 pr_debug("PM: Hibernation image partition %d:%d present\n",
734 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); 751 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
735 752
736 pr_debug("PM: Checking hibernation image.\n"); 753 pr_debug("PM: Looking for hibernation image.\n");
737 error = swsusp_check(); 754 error = swsusp_check();
738 if (error) 755 if (error)
739 goto Unlock; 756 goto Unlock;
@@ -765,14 +782,14 @@ static int software_resume(void)
765 goto Done; 782 goto Done;
766 } 783 }
767 784
768 pr_debug("PM: Reading hibernation image.\n"); 785 pr_debug("PM: Loading hibernation image.\n");
769 786
770 error = swsusp_read(&flags); 787 error = swsusp_read(&flags);
771 swsusp_close(FMODE_READ); 788 swsusp_close(FMODE_READ);
772 if (!error) 789 if (!error)
773 hibernation_restore(flags & SF_PLATFORM_MODE); 790 hibernation_restore(flags & SF_PLATFORM_MODE);
774 791
775 printk(KERN_ERR "PM: Restore failed, recovering.\n"); 792 printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
776 swsusp_free(); 793 swsusp_free();
777 thaw_processes(); 794 thaw_processes();
778 Done: 795 Done:
@@ -785,7 +802,7 @@ static int software_resume(void)
785 /* For success case, the suspend path will release the lock */ 802 /* For success case, the suspend path will release the lock */
786 Unlock: 803 Unlock:
787 mutex_unlock(&pm_mutex); 804 mutex_unlock(&pm_mutex);
788 pr_debug("PM: Resume from disk failed.\n"); 805 pr_debug("PM: Hibernation image not present or could not be loaded.\n");
789 return error; 806 return error;
790close_finish: 807close_finish:
791 swsusp_close(FMODE_READ); 808 swsusp_close(FMODE_READ);
@@ -803,21 +820,17 @@ static const char * const hibernation_modes[] = {
803 [HIBERNATION_TESTPROC] = "testproc", 820 [HIBERNATION_TESTPROC] = "testproc",
804}; 821};
805 822
806/** 823/*
807 * disk - Control hibernation mode 824 * /sys/power/disk - Control hibernation mode.
808 *
809 * Suspend-to-disk can be handled in several ways. We have a few options
810 * for putting the system to sleep - using the platform driver (e.g. ACPI
811 * or other hibernation_ops), powering off the system or rebooting the
812 * system (for testing) as well as the two test modes.
813 * 825 *
814 * The system can support 'platform', and that is known a priori (and 826 * Hibernation can be handled in several ways. There are a few different ways
815 * encoded by the presence of hibernation_ops). However, the user may 827 * to put the system into the sleep state: using the platform driver (e.g. ACPI
816 * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the 828 * or other hibernation_ops), powering it off or rebooting it (for testing
817 * test modes, 'test' or 'testproc'. 829 * mostly), or using one of the two available test modes.
818 * 830 *
819 * show() will display what the mode is currently set to. 831 * The sysfs file /sys/power/disk provides an interface for selecting the
820 * store() will accept one of 832 * hibernation mode to use. Reading from this file causes the available modes
833 * to be printed. There are 5 modes that can be supported:
821 * 834 *
822 * 'platform' 835 * 'platform'
823 * 'shutdown' 836 * 'shutdown'
@@ -825,8 +838,14 @@ static const char * const hibernation_modes[] = {
825 * 'test' 838 * 'test'
826 * 'testproc' 839 * 'testproc'
827 * 840 *
828 * It will only change to 'platform' if the system 841 * If a platform hibernation driver is in use, 'platform' will be supported
829 * supports it (as determined by having hibernation_ops). 842 * and will be used by default. Otherwise, 'shutdown' will be used by default.
843 * The selected option (i.e. the one corresponding to the current value of
844 * hibernation_mode) is enclosed by a square bracket.
845 *
846 * To select a given hibernation mode it is necessary to write the mode's
847 * string representation (as returned by reading from /sys/power/disk) back
848 * into /sys/power/disk.
830 */ 849 */
831 850
832static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, 851static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -859,7 +878,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
859 return buf-start; 878 return buf-start;
860} 879}
861 880
862
863static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, 881static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
864 const char *buf, size_t n) 882 const char *buf, size_t n)
865{ 883{
@@ -961,10 +979,33 @@ static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *att
961 979
962power_attr(image_size); 980power_attr(image_size);
963 981
982static ssize_t reserved_size_show(struct kobject *kobj,
983 struct kobj_attribute *attr, char *buf)
984{
985 return sprintf(buf, "%lu\n", reserved_size);
986}
987
988static ssize_t reserved_size_store(struct kobject *kobj,
989 struct kobj_attribute *attr,
990 const char *buf, size_t n)
991{
992 unsigned long size;
993
994 if (sscanf(buf, "%lu", &size) == 1) {
995 reserved_size = size;
996 return n;
997 }
998
999 return -EINVAL;
1000}
1001
1002power_attr(reserved_size);
1003
964static struct attribute * g[] = { 1004static struct attribute * g[] = {
965 &disk_attr.attr, 1005 &disk_attr.attr,
966 &resume_attr.attr, 1006 &resume_attr.attr,
967 &image_size_attr.attr, 1007 &image_size_attr.attr,
1008 &reserved_size_attr.attr,
968 NULL, 1009 NULL,
969}; 1010};
970 1011
@@ -1004,6 +1045,15 @@ static int __init resume_offset_setup(char *str)
1004 return 1; 1045 return 1;
1005} 1046}
1006 1047
1048static int __init hibernate_setup(char *str)
1049{
1050 if (!strncmp(str, "noresume", 8))
1051 noresume = 1;
1052 else if (!strncmp(str, "nocompress", 10))
1053 nocompress = 1;
1054 return 1;
1055}
1056
1007static int __init noresume_setup(char *str) 1057static int __init noresume_setup(char *str)
1008{ 1058{
1009 noresume = 1; 1059 noresume = 1;
@@ -1013,3 +1063,4 @@ static int __init noresume_setup(char *str)
1013__setup("noresume", noresume_setup); 1063__setup("noresume", noresume_setup);
1014__setup("resume_offset=", resume_offset_setup); 1064__setup("resume_offset=", resume_offset_setup);
1015__setup("resume=", resume_setup); 1065__setup("resume=", resume_setup);
1066__setup("hibernate=", hibernate_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 62b0bc6e4983..2981af4ce7cb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -17,9 +17,6 @@
17 17
18DEFINE_MUTEX(pm_mutex); 18DEFINE_MUTEX(pm_mutex);
19 19
20unsigned int pm_flags;
21EXPORT_SYMBOL(pm_flags);
22
23#ifdef CONFIG_PM_SLEEP 20#ifdef CONFIG_PM_SLEEP
24 21
25/* Routines for PM-transition notifications */ 22/* Routines for PM-transition notifications */
@@ -227,7 +224,7 @@ power_attr(state);
227 * writing to 'state'. It first should read from 'wakeup_count' and store 224 * writing to 'state'. It first should read from 'wakeup_count' and store
228 * the read value. Then, after carrying out its own preparations for the system 225 * the read value. Then, after carrying out its own preparations for the system
229 * transition to a sleep state, it should write the stored value to 226 * transition to a sleep state, it should write the stored value to
230 * 'wakeup_count'. If that fails, at least one wakeup event has occured since 227 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
231 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
232 * is allowed to write to 'state', but the transition will be aborted if there 229 * is allowed to write to 'state', but the transition will be aborted if there
233 * are any wakeup events detected after 'wakeup_count' was written to. 230 * are any wakeup events detected after 'wakeup_count' was written to.
@@ -237,18 +234,18 @@ static ssize_t wakeup_count_show(struct kobject *kobj,
237 struct kobj_attribute *attr, 234 struct kobj_attribute *attr,
238 char *buf) 235 char *buf)
239{ 236{
240 unsigned long val; 237 unsigned int val;
241 238
242 return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; 239 return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR;
243} 240}
244 241
245static ssize_t wakeup_count_store(struct kobject *kobj, 242static ssize_t wakeup_count_store(struct kobject *kobj,
246 struct kobj_attribute *attr, 243 struct kobj_attribute *attr,
247 const char *buf, size_t n) 244 const char *buf, size_t n)
248{ 245{
249 unsigned long val; 246 unsigned int val;
250 247
251 if (sscanf(buf, "%lu", &val) == 1) { 248 if (sscanf(buf, "%u", &val) == 1) {
252 if (pm_save_wakeup_count(val)) 249 if (pm_save_wakeup_count(val))
253 return n; 250 return n;
254 } 251 }
@@ -281,12 +278,30 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
281} 278}
282 279
283power_attr(pm_trace); 280power_attr(pm_trace);
281
282static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
283 struct kobj_attribute *attr,
284 char *buf)
285{
286 return show_trace_dev_match(buf, PAGE_SIZE);
287}
288
289static ssize_t
290pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
291 const char *buf, size_t n)
292{
293 return -EINVAL;
294}
295
296power_attr(pm_trace_dev_match);
297
284#endif /* CONFIG_PM_TRACE */ 298#endif /* CONFIG_PM_TRACE */
285 299
286static struct attribute * g[] = { 300static struct attribute * g[] = {
287 &state_attr.attr, 301 &state_attr.attr,
288#ifdef CONFIG_PM_TRACE 302#ifdef CONFIG_PM_TRACE
289 &pm_trace_attr.attr, 303 &pm_trace_attr.attr,
304 &pm_trace_dev_match_attr.attr,
290#endif 305#endif
291#ifdef CONFIG_PM_SLEEP 306#ifdef CONFIG_PM_SLEEP
292 &pm_async_attr.attr, 307 &pm_async_attr.attr,
@@ -308,7 +323,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
308 323
309static int __init pm_start_workqueue(void) 324static int __init pm_start_workqueue(void)
310{ 325{
311 pm_wq = create_freezeable_workqueue("pm"); 326 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
312 327
313 return pm_wq ? 0 : -ENOMEM; 328 return pm_wq ? 0 : -ENOMEM;
314} 329}
@@ -321,6 +336,8 @@ static int __init pm_init(void)
321 int error = pm_start_workqueue(); 336 int error = pm_start_workqueue();
322 if (error) 337 if (error)
323 return error; 338 return error;
339 hibernate_image_size_init();
340 hibernate_reserved_size_init();
324 power_kobj = kobject_create_and_add("power", NULL); 341 power_kobj = kobject_create_and_add("power", NULL);
325 if (!power_kobj) 342 if (!power_kobj)
326 return -ENOMEM; 343 return -ENOMEM;
diff --git a/kernel/power/nvs.c b/kernel/power/nvs.c
deleted file mode 100644
index 1836db60bbb6..000000000000
--- a/kernel/power/nvs.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
3 *
4 * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/suspend.h>
15
16/*
17 * Platforms, like ACPI, may want us to save some memory used by them during
18 * suspend and to restore the contents of this memory during the subsequent
19 * resume. The code below implements a mechanism allowing us to do that.
20 */
21
22struct nvs_page {
23 unsigned long phys_start;
24 unsigned int size;
25 void *kaddr;
26 void *data;
27 struct list_head node;
28};
29
30static LIST_HEAD(nvs_list);
31
32/**
33 * suspend_nvs_register - register platform NVS memory region to save
34 * @start - physical address of the region
35 * @size - size of the region
36 *
37 * The NVS region need not be page-aligned (both ends) and we arrange
38 * things so that the data from page-aligned addresses in this region will
39 * be copied into separate RAM pages.
40 */
41int suspend_nvs_register(unsigned long start, unsigned long size)
42{
43 struct nvs_page *entry, *next;
44
45 while (size > 0) {
46 unsigned int nr_bytes;
47
48 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
49 if (!entry)
50 goto Error;
51
52 list_add_tail(&entry->node, &nvs_list);
53 entry->phys_start = start;
54 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
55 entry->size = (size < nr_bytes) ? size : nr_bytes;
56
57 start += entry->size;
58 size -= entry->size;
59 }
60 return 0;
61
62 Error:
63 list_for_each_entry_safe(entry, next, &nvs_list, node) {
64 list_del(&entry->node);
65 kfree(entry);
66 }
67 return -ENOMEM;
68}
69
70/**
71 * suspend_nvs_free - free data pages allocated for saving NVS regions
72 */
73void suspend_nvs_free(void)
74{
75 struct nvs_page *entry;
76
77 list_for_each_entry(entry, &nvs_list, node)
78 if (entry->data) {
79 free_page((unsigned long)entry->data);
80 entry->data = NULL;
81 if (entry->kaddr) {
82 iounmap(entry->kaddr);
83 entry->kaddr = NULL;
84 }
85 }
86}
87
88/**
89 * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
90 */
91int suspend_nvs_alloc(void)
92{
93 struct nvs_page *entry;
94
95 list_for_each_entry(entry, &nvs_list, node) {
96 entry->data = (void *)__get_free_page(GFP_KERNEL);
97 if (!entry->data) {
98 suspend_nvs_free();
99 return -ENOMEM;
100 }
101 }
102 return 0;
103}
104
105/**
106 * suspend_nvs_save - save NVS memory regions
107 */
108void suspend_nvs_save(void)
109{
110 struct nvs_page *entry;
111
112 printk(KERN_INFO "PM: Saving platform NVS memory\n");
113
114 list_for_each_entry(entry, &nvs_list, node)
115 if (entry->data) {
116 entry->kaddr = ioremap(entry->phys_start, entry->size);
117 memcpy(entry->data, entry->kaddr, entry->size);
118 }
119}
120
121/**
122 * suspend_nvs_restore - restore NVS memory regions
123 *
124 * This function is going to be called with interrupts disabled, so it
125 * cannot iounmap the virtual addresses used to access the NVS region.
126 */
127void suspend_nvs_restore(void)
128{
129 struct nvs_page *entry;
130
131 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
132
133 list_for_each_entry(entry, &nvs_list, node)
134 if (entry->data)
135 memcpy(entry->kaddr, entry->data, entry->size);
136}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 006270fe382d..9a00a0a26280 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -14,6 +14,10 @@ struct swsusp_info {
14} __attribute__((aligned(PAGE_SIZE))); 14} __attribute__((aligned(PAGE_SIZE)));
15 15
16#ifdef CONFIG_HIBERNATION 16#ifdef CONFIG_HIBERNATION
17/* kernel/power/snapshot.c */
18extern void __init hibernate_reserved_size_init(void);
19extern void __init hibernate_image_size_init(void);
20
17#ifdef CONFIG_ARCH_HIBERNATION_HEADER 21#ifdef CONFIG_ARCH_HIBERNATION_HEADER
18/* Maximum size of architecture specific data in a hibernation header */ 22/* Maximum size of architecture specific data in a hibernation header */
19#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) 23#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
@@ -49,7 +53,12 @@ static inline char *check_image_kernel(struct swsusp_info *info)
49extern int hibernation_snapshot(int platform_mode); 53extern int hibernation_snapshot(int platform_mode);
50extern int hibernation_restore(int platform_mode); 54extern int hibernation_restore(int platform_mode);
51extern int hibernation_platform_enter(void); 55extern int hibernation_platform_enter(void);
52#endif 56
57#else /* !CONFIG_HIBERNATION */
58
59static inline void hibernate_reserved_size_init(void) {}
60static inline void hibernate_image_size_init(void) {}
61#endif /* !CONFIG_HIBERNATION */
53 62
54extern int pfn_is_nosave(unsigned long); 63extern int pfn_is_nosave(unsigned long);
55 64
@@ -65,6 +74,8 @@ static struct kobj_attribute _name##_attr = { \
65 74
66/* Preferred image size in bytes (default 500 MB) */ 75/* Preferred image size in bytes (default 500 MB) */
67extern unsigned long image_size; 76extern unsigned long image_size;
77/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
78extern unsigned long reserved_size;
68extern int in_suspend; 79extern int in_suspend;
69extern dev_t swsusp_resume_device; 80extern dev_t swsusp_resume_device;
70extern sector_t swsusp_resume_block; 81extern sector_t swsusp_resume_block;
@@ -134,6 +145,7 @@ extern int swsusp_swap_in_use(void);
134 * the image header. 145 * the image header.
135 */ 146 */
136#define SF_PLATFORM_MODE 1 147#define SF_PLATFORM_MODE 1
148#define SF_NOCOMPRESS_MODE 2
137 149
138/* kernel/power/hibernate.c */ 150/* kernel/power/hibernate.c */
139extern int swsusp_check(void); 151extern int swsusp_check(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 028a99598f49..0cf3a27a6c9d 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezeable(struct task_struct * p) 25static inline int freezable(struct task_struct * p)
26{ 26{
27 if ((p == current) || 27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) || 28 (p->flags & PF_NOFREEZE) ||
@@ -40,6 +40,7 @@ static int try_to_freeze_tasks(bool sig_only)
40 struct timeval start, end; 40 struct timeval start, end;
41 u64 elapsed_csecs64; 41 u64 elapsed_csecs64;
42 unsigned int elapsed_csecs; 42 unsigned int elapsed_csecs;
43 bool wakeup = false;
43 44
44 do_gettimeofday(&start); 45 do_gettimeofday(&start);
45 46
@@ -52,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
52 todo = 0; 53 todo = 0;
53 read_lock(&tasklist_lock); 54 read_lock(&tasklist_lock);
54 do_each_thread(g, p) { 55 do_each_thread(g, p) {
55 if (frozen(p) || !freezeable(p)) 56 if (frozen(p) || !freezable(p))
56 continue; 57 continue;
57 58
58 if (!freeze_task(p, sig_only)) 59 if (!freeze_task(p, sig_only))
@@ -63,6 +64,12 @@ static int try_to_freeze_tasks(bool sig_only)
63 * perturb a task in TASK_STOPPED or TASK_TRACED. 64 * perturb a task in TASK_STOPPED or TASK_TRACED.
64 * It is "frozen enough". If the task does wake 65 * It is "frozen enough". If the task does wake
65 * up, it will immediately call try_to_freeze. 66 * up, it will immediately call try_to_freeze.
67 *
68 * Because freeze_task() goes through p's
69 * scheduler lock after setting TIF_FREEZE, it's
70 * guaranteed that either we see TASK_RUNNING or
71 * try_to_stop() after schedule() in ptrace/signal
72 * stop sees TIF_FREEZE.
66 */ 73 */
67 if (!task_is_stopped_or_traced(p) && 74 if (!task_is_stopped_or_traced(p) &&
68 !freezer_should_skip(p)) 75 !freezer_should_skip(p))
@@ -78,6 +85,11 @@ static int try_to_freeze_tasks(bool sig_only)
78 if (!todo || time_after(jiffies, end_time)) 85 if (!todo || time_after(jiffies, end_time))
79 break; 86 break;
80 87
88 if (pm_wakeup_pending()) {
89 wakeup = true;
90 break;
91 }
92
81 /* 93 /*
82 * We need to retry, but first give the freezing tasks some 94 * We need to retry, but first give the freezing tasks some
83 * time to enter the regrigerator. 95 * time to enter the regrigerator.
@@ -97,8 +109,9 @@ static int try_to_freeze_tasks(bool sig_only)
97 * but it cleans up leftover PF_FREEZE requests. 109 * but it cleans up leftover PF_FREEZE requests.
98 */ 110 */
99 printk("\n"); 111 printk("\n");
100 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " 112 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
101 "(%d tasks refusing to freeze, wq_busy=%d):\n", 113 "(%d tasks refusing to freeze, wq_busy=%d):\n",
114 wakeup ? "aborted" : "failed",
102 elapsed_csecs / 100, elapsed_csecs % 100, 115 elapsed_csecs / 100, elapsed_csecs % 100,
103 todo - wq_busy, wq_busy); 116 todo - wq_busy, wq_busy);
104 117
@@ -107,7 +120,7 @@ static int try_to_freeze_tasks(bool sig_only)
107 read_lock(&tasklist_lock); 120 read_lock(&tasklist_lock);
108 do_each_thread(g, p) { 121 do_each_thread(g, p) {
109 task_lock(p); 122 task_lock(p);
110 if (freezing(p) && !freezer_should_skip(p)) 123 if (!wakeup && freezing(p) && !freezer_should_skip(p))
111 sched_show_task(p); 124 sched_show_task(p);
112 cancel_freezing(p); 125 cancel_freezing(p);
113 task_unlock(p); 126 task_unlock(p);
@@ -154,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
154 167
155 read_lock(&tasklist_lock); 168 read_lock(&tasklist_lock);
156 do_each_thread(g, p) { 169 do_each_thread(g, p) {
157 if (!freezeable(p)) 170 if (!freezable(p))
158 continue; 171 continue;
159 172
160 if (nosig_only && should_send_signal(p)) 173 if (nosig_only && should_send_signal(p))
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index d3f795f01bbc..06efa54f93d6 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -41,12 +41,29 @@ static void swsusp_set_page_forbidden(struct page *);
41static void swsusp_unset_page_forbidden(struct page *); 41static void swsusp_unset_page_forbidden(struct page *);
42 42
43/* 43/*
44 * Number of bytes to reserve for memory allocations made by device drivers
45 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
46 * cause image creation to fail (tunable via /sys/power/reserved_size).
47 */
48unsigned long reserved_size;
49
50void __init hibernate_reserved_size_init(void)
51{
52 reserved_size = SPARE_PAGES * PAGE_SIZE;
53}
54
55/*
44 * Preferred image size in bytes (tunable via /sys/power/image_size). 56 * Preferred image size in bytes (tunable via /sys/power/image_size).
45 * When it is set to N, swsusp will do its best to ensure the image 57 * When it is set to N, swsusp will do its best to ensure the image
46 * size will not exceed N bytes, but if that is impossible, it will 58 * size will not exceed N bytes, but if that is impossible, it will
47 * try to create the smallest image possible. 59 * try to create the smallest image possible.
48 */ 60 */
49unsigned long image_size = 500 * 1024 * 1024; 61unsigned long image_size;
62
63void __init hibernate_image_size_init(void)
64{
65 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
66}
50 67
51/* List of PBEs needed for restoring the pages that were allocated before 68/* List of PBEs needed for restoring the pages that were allocated before
52 * the suspend and included in the suspend image, but have also been 69 * the suspend and included in the suspend image, but have also been
@@ -979,8 +996,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
979 src = kmap_atomic(s_page, KM_USER0); 996 src = kmap_atomic(s_page, KM_USER0);
980 dst = kmap_atomic(d_page, KM_USER1); 997 dst = kmap_atomic(d_page, KM_USER1);
981 do_copy_page(dst, src); 998 do_copy_page(dst, src);
982 kunmap_atomic(src, KM_USER0);
983 kunmap_atomic(dst, KM_USER1); 999 kunmap_atomic(dst, KM_USER1);
1000 kunmap_atomic(src, KM_USER0);
984 } else { 1001 } else {
985 if (PageHighMem(d_page)) { 1002 if (PageHighMem(d_page)) {
986 /* Page pointed to by src may contain some kernel 1003 /* Page pointed to by src may contain some kernel
@@ -988,7 +1005,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
988 */ 1005 */
989 safe_copy_page(buffer, s_page); 1006 safe_copy_page(buffer, s_page);
990 dst = kmap_atomic(d_page, KM_USER0); 1007 dst = kmap_atomic(d_page, KM_USER0);
991 memcpy(dst, buffer, PAGE_SIZE); 1008 copy_page(dst, buffer);
992 kunmap_atomic(dst, KM_USER0); 1009 kunmap_atomic(dst, KM_USER0);
993 } else { 1010 } else {
994 safe_copy_page(page_address(d_page), s_page); 1011 safe_copy_page(page_address(d_page), s_page);
@@ -1194,7 +1211,11 @@ static void free_unnecessary_pages(void)
1194 to_free_highmem = alloc_highmem - save; 1211 to_free_highmem = alloc_highmem - save;
1195 } else { 1212 } else {
1196 to_free_highmem = 0; 1213 to_free_highmem = 0;
1197 to_free_normal -= save - alloc_highmem; 1214 save -= alloc_highmem;
1215 if (to_free_normal > save)
1216 to_free_normal -= save;
1217 else
1218 to_free_normal = 0;
1198 } 1219 }
1199 1220
1200 memory_bm_position_reset(&copy_bm); 1221 memory_bm_position_reset(&copy_bm);
@@ -1258,11 +1279,13 @@ static unsigned long minimum_image_size(unsigned long saveable)
1258 * frame in use. We also need a number of page frames to be free during 1279 * frame in use. We also need a number of page frames to be free during
1259 * hibernation for allocations made while saving the image and for device 1280 * hibernation for allocations made while saving the image and for device
1260 * drivers, in case they need to allocate memory from their hibernation 1281 * drivers, in case they need to allocate memory from their hibernation
1261 * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, 1282 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1262 * respectively, both of which are rough estimates). To make this happen, we 1283 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1263 * compute the total number of available page frames and allocate at least 1284 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1285 * total number of available page frames and allocate at least
1264 * 1286 *
1265 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES 1287 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1288 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1266 * 1289 *
1267 * of them, which corresponds to the maximum size of a hibernation image. 1290 * of them, which corresponds to the maximum size of a hibernation image.
1268 * 1291 *
@@ -1317,13 +1340,16 @@ int hibernate_preallocate_memory(void)
1317 count -= totalreserve_pages; 1340 count -= totalreserve_pages;
1318 1341
1319 /* Compute the maximum number of saveable pages to leave in memory. */ 1342 /* Compute the maximum number of saveable pages to leave in memory. */
1320 max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; 1343 max_size = (count - (size + PAGES_FOR_IO)) / 2
1344 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1345 /* Compute the desired number of image pages specified by image_size. */
1321 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1346 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1322 if (size > max_size) 1347 if (size > max_size)
1323 size = max_size; 1348 size = max_size;
1324 /* 1349 /*
1325 * If the maximum is not less than the current number of saveable pages 1350 * If the desired number of image pages is at least as large as the
1326 * in memory, allocate page frames for the image and we're done. 1351 * current number of saveable pages in memory, allocate page frames for
1352 * the image and we're done.
1327 */ 1353 */
1328 if (size >= saveable) { 1354 if (size >= saveable) {
1329 pages = preallocate_image_highmem(save_highmem); 1355 pages = preallocate_image_highmem(save_highmem);
@@ -1512,11 +1538,8 @@ static int
1512swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, 1538swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1513 unsigned int nr_pages, unsigned int nr_highmem) 1539 unsigned int nr_pages, unsigned int nr_highmem)
1514{ 1540{
1515 int error = 0;
1516
1517 if (nr_highmem > 0) { 1541 if (nr_highmem > 0) {
1518 error = get_highmem_buffer(PG_ANY); 1542 if (get_highmem_buffer(PG_ANY))
1519 if (error)
1520 goto err_out; 1543 goto err_out;
1521 if (nr_highmem > alloc_highmem) { 1544 if (nr_highmem > alloc_highmem) {
1522 nr_highmem -= alloc_highmem; 1545 nr_highmem -= alloc_highmem;
@@ -1539,7 +1562,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1539 1562
1540 err_out: 1563 err_out:
1541 swsusp_free(); 1564 swsusp_free();
1542 return error; 1565 return -ENOMEM;
1543} 1566}
1544 1567
1545asmlinkage int swsusp_save(void) 1568asmlinkage int swsusp_save(void)
@@ -1680,7 +1703,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
1680 memory_bm_position_reset(&orig_bm); 1703 memory_bm_position_reset(&orig_bm);
1681 memory_bm_position_reset(&copy_bm); 1704 memory_bm_position_reset(&copy_bm);
1682 } else if (handle->cur <= nr_meta_pages) { 1705 } else if (handle->cur <= nr_meta_pages) {
1683 memset(buffer, 0, PAGE_SIZE); 1706 clear_page(buffer);
1684 pack_pfns(buffer, &orig_bm); 1707 pack_pfns(buffer, &orig_bm);
1685 } else { 1708 } else {
1686 struct page *page; 1709 struct page *page;
@@ -1694,7 +1717,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
1694 void *kaddr; 1717 void *kaddr;
1695 1718
1696 kaddr = kmap_atomic(page, KM_USER0); 1719 kaddr = kmap_atomic(page, KM_USER0);
1697 memcpy(buffer, kaddr, PAGE_SIZE); 1720 copy_page(buffer, kaddr);
1698 kunmap_atomic(kaddr, KM_USER0); 1721 kunmap_atomic(kaddr, KM_USER0);
1699 handle->buffer = buffer; 1722 handle->buffer = buffer;
1700 } else { 1723 } else {
@@ -1977,7 +2000,7 @@ static void copy_last_highmem_page(void)
1977 void *dst; 2000 void *dst;
1978 2001
1979 dst = kmap_atomic(last_highmem_page, KM_USER0); 2002 dst = kmap_atomic(last_highmem_page, KM_USER0);
1980 memcpy(dst, buffer, PAGE_SIZE); 2003 copy_page(dst, buffer);
1981 kunmap_atomic(dst, KM_USER0); 2004 kunmap_atomic(dst, KM_USER0);
1982 last_highmem_page = NULL; 2005 last_highmem_page = NULL;
1983 } 2006 }
@@ -2263,11 +2286,11 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2263 2286
2264 kaddr1 = kmap_atomic(p1, KM_USER0); 2287 kaddr1 = kmap_atomic(p1, KM_USER0);
2265 kaddr2 = kmap_atomic(p2, KM_USER1); 2288 kaddr2 = kmap_atomic(p2, KM_USER1);
2266 memcpy(buf, kaddr1, PAGE_SIZE); 2289 copy_page(buf, kaddr1);
2267 memcpy(kaddr1, kaddr2, PAGE_SIZE); 2290 copy_page(kaddr1, kaddr2);
2268 memcpy(kaddr2, buf, PAGE_SIZE); 2291 copy_page(kaddr2, buf);
2269 kunmap_atomic(kaddr1, KM_USER0);
2270 kunmap_atomic(kaddr2, KM_USER1); 2292 kunmap_atomic(kaddr2, KM_USER1);
2293 kunmap_atomic(kaddr1, KM_USER0);
2271} 2294}
2272 2295
2273/** 2296/**
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 7335952ee473..1c41ba215419 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -22,6 +22,8 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/suspend.h> 24#include <linux/suspend.h>
25#include <linux/syscore_ops.h>
26#include <trace/events/power.h>
25 27
26#include "power.h" 28#include "power.h"
27 29
@@ -30,13 +32,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
30 [PM_SUSPEND_MEM] = "mem", 32 [PM_SUSPEND_MEM] = "mem",
31}; 33};
32 34
33static struct platform_suspend_ops *suspend_ops; 35static const struct platform_suspend_ops *suspend_ops;
34 36
35/** 37/**
36 * suspend_set_ops - Set the global suspend method table. 38 * suspend_set_ops - Set the global suspend method table.
37 * @ops: Pointer to ops structure. 39 * @ops: Pointer to ops structure.
38 */ 40 */
39void suspend_set_ops(struct platform_suspend_ops *ops) 41void suspend_set_ops(const struct platform_suspend_ops *ops)
40{ 42{
41 mutex_lock(&pm_mutex); 43 mutex_lock(&pm_mutex);
42 suspend_ops = ops; 44 suspend_ops = ops;
@@ -161,13 +163,13 @@ static int suspend_enter(suspend_state_t state)
161 arch_suspend_disable_irqs(); 163 arch_suspend_disable_irqs();
162 BUG_ON(!irqs_disabled()); 164 BUG_ON(!irqs_disabled());
163 165
164 error = sysdev_suspend(PMSG_SUSPEND); 166 error = syscore_suspend();
165 if (!error) { 167 if (!error) {
166 if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { 168 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
167 error = suspend_ops->enter(state); 169 error = suspend_ops->enter(state);
168 events_check_enabled = false; 170 events_check_enabled = false;
169 } 171 }
170 sysdev_resume(); 172 syscore_resume();
171 } 173 }
172 174
173 arch_suspend_enable_irqs(); 175 arch_suspend_enable_irqs();
@@ -197,18 +199,17 @@ static int suspend_enter(suspend_state_t state)
197int suspend_devices_and_enter(suspend_state_t state) 199int suspend_devices_and_enter(suspend_state_t state)
198{ 200{
199 int error; 201 int error;
200 gfp_t saved_mask;
201 202
202 if (!suspend_ops) 203 if (!suspend_ops)
203 return -ENOSYS; 204 return -ENOSYS;
204 205
206 trace_machine_suspend(state);
205 if (suspend_ops->begin) { 207 if (suspend_ops->begin) {
206 error = suspend_ops->begin(state); 208 error = suspend_ops->begin(state);
207 if (error) 209 if (error)
208 goto Close; 210 goto Close;
209 } 211 }
210 suspend_console(); 212 suspend_console();
211 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
212 suspend_test_start(); 213 suspend_test_start();
213 error = dpm_suspend_start(PMSG_SUSPEND); 214 error = dpm_suspend_start(PMSG_SUSPEND);
214 if (error) { 215 if (error) {
@@ -219,17 +220,17 @@ int suspend_devices_and_enter(suspend_state_t state)
219 if (suspend_test(TEST_DEVICES)) 220 if (suspend_test(TEST_DEVICES))
220 goto Recover_platform; 221 goto Recover_platform;
221 222
222 suspend_enter(state); 223 error = suspend_enter(state);
223 224
224 Resume_devices: 225 Resume_devices:
225 suspend_test_start(); 226 suspend_test_start();
226 dpm_resume_end(PMSG_RESUME); 227 dpm_resume_end(PMSG_RESUME);
227 suspend_test_finish("resume devices"); 228 suspend_test_finish("resume devices");
228 set_gfp_allowed_mask(saved_mask);
229 resume_console(); 229 resume_console();
230 Close: 230 Close:
231 if (suspend_ops->end) 231 if (suspend_ops->end)
232 suspend_ops->end(); 232 suspend_ops->end();
233 trace_machine_suspend(PWR_EVENT_EXIT);
233 return error; 234 return error;
234 235
235 Recover_platform: 236 Recover_platform:
@@ -285,7 +286,9 @@ int enter_state(suspend_state_t state)
285 goto Finish; 286 goto Finish;
286 287
287 pr_debug("PM: Entering %s sleep\n", pm_states[state]); 288 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
289 pm_restrict_gfp_mask();
288 error = suspend_devices_and_enter(state); 290 error = suspend_devices_and_enter(state);
291 pm_restore_gfp_mask();
289 292
290 Finish: 293 Finish:
291 pr_debug("PM: Finishing wakeup.\n"); 294 pr_debug("PM: Finishing wakeup.\n");
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index e6a5bdf61a37..7c97c3a0eee3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -6,6 +6,7 @@
6 * 6 *
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
9 * 10 *
10 * This file is released under the GPLv2. 11 * This file is released under the GPLv2.
11 * 12 *
@@ -24,10 +25,12 @@
24#include <linux/swapops.h> 25#include <linux/swapops.h>
25#include <linux/pm.h> 26#include <linux/pm.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/lzo.h>
29#include <linux/vmalloc.h>
27 30
28#include "power.h" 31#include "power.h"
29 32
30#define SWSUSP_SIG "S1SUSPEND" 33#define HIBERNATE_SIG "S1SUSPEND"
31 34
32/* 35/*
33 * The swap map is a data structure used for keeping track of each page 36 * The swap map is a data structure used for keeping track of each page
@@ -193,7 +196,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
193 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 196 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
194 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { 197 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
195 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); 198 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
196 memcpy(swsusp_header->sig,SWSUSP_SIG, 10); 199 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
197 swsusp_header->image = handle->first_sector; 200 swsusp_header->image = handle->first_sector;
198 swsusp_header->flags = flags; 201 swsusp_header->flags = flags;
199 error = hib_bio_write_page(swsusp_resume_block, 202 error = hib_bio_write_page(swsusp_resume_block,
@@ -221,7 +224,7 @@ static int swsusp_swap_check(void)
221 return res; 224 return res;
222 225
223 root_swap = res; 226 root_swap = res;
224 res = blkdev_get(hib_resume_bdev, FMODE_WRITE); 227 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
225 if (res) 228 if (res)
226 return res; 229 return res;
227 230
@@ -249,7 +252,7 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
249 if (bio_chain) { 252 if (bio_chain) {
250 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 253 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
251 if (src) { 254 if (src) {
252 memcpy(src, buf, PAGE_SIZE); 255 copy_page(src, buf);
253 } else { 256 } else {
254 WARN_ON_ONCE(1); 257 WARN_ON_ONCE(1);
255 bio_chain = NULL; /* Go synchronous */ 258 bio_chain = NULL; /* Go synchronous */
@@ -323,7 +326,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
323 error = write_page(handle->cur, handle->cur_swap, NULL); 326 error = write_page(handle->cur, handle->cur_swap, NULL);
324 if (error) 327 if (error)
325 goto out; 328 goto out;
326 memset(handle->cur, 0, PAGE_SIZE); 329 clear_page(handle->cur);
327 handle->cur_swap = offset; 330 handle->cur_swap = offset;
328 handle->k = 0; 331 handle->k = 0;
329 } 332 }
@@ -357,6 +360,18 @@ static int swap_writer_finish(struct swap_map_handle *handle,
357 return error; 360 return error;
358} 361}
359 362
363/* We need to remember how much compressed data we need to read. */
364#define LZO_HEADER sizeof(size_t)
365
366/* Number of pages/bytes we'll compress at one time. */
367#define LZO_UNC_PAGES 32
368#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
369
370/* Number of pages/bytes we need for compressed data (worst case). */
371#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
372 LZO_HEADER, PAGE_SIZE)
373#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
374
360/** 375/**
361 * save_image - save the suspend image data 376 * save_image - save the suspend image data
362 */ 377 */
@@ -404,6 +419,137 @@ static int save_image(struct swap_map_handle *handle,
404 return ret; 419 return ret;
405} 420}
406 421
422
423/**
424 * save_image_lzo - Save the suspend image data compressed with LZO.
425 * @handle: Swap mam handle to use for saving the image.
426 * @snapshot: Image to read data from.
427 * @nr_to_write: Number of pages to save.
428 */
429static int save_image_lzo(struct swap_map_handle *handle,
430 struct snapshot_handle *snapshot,
431 unsigned int nr_to_write)
432{
433 unsigned int m;
434 int ret = 0;
435 int nr_pages;
436 int err2;
437 struct bio *bio;
438 struct timeval start;
439 struct timeval stop;
440 size_t off, unc_len, cmp_len;
441 unsigned char *unc, *cmp, *wrk, *page;
442
443 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
444 if (!page) {
445 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
446 return -ENOMEM;
447 }
448
449 wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
450 if (!wrk) {
451 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
452 free_page((unsigned long)page);
453 return -ENOMEM;
454 }
455
456 unc = vmalloc(LZO_UNC_SIZE);
457 if (!unc) {
458 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
459 vfree(wrk);
460 free_page((unsigned long)page);
461 return -ENOMEM;
462 }
463
464 cmp = vmalloc(LZO_CMP_SIZE);
465 if (!cmp) {
466 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
467 vfree(unc);
468 vfree(wrk);
469 free_page((unsigned long)page);
470 return -ENOMEM;
471 }
472
473 printk(KERN_INFO
474 "PM: Compressing and saving image data (%u pages) ... ",
475 nr_to_write);
476 m = nr_to_write / 100;
477 if (!m)
478 m = 1;
479 nr_pages = 0;
480 bio = NULL;
481 do_gettimeofday(&start);
482 for (;;) {
483 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
484 ret = snapshot_read_next(snapshot);
485 if (ret < 0)
486 goto out_finish;
487
488 if (!ret)
489 break;
490
491 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
492
493 if (!(nr_pages % m))
494 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
495 nr_pages++;
496 }
497
498 if (!off)
499 break;
500
501 unc_len = off;
502 ret = lzo1x_1_compress(unc, unc_len,
503 cmp + LZO_HEADER, &cmp_len, wrk);
504 if (ret < 0) {
505 printk(KERN_ERR "PM: LZO compression failed\n");
506 break;
507 }
508
509 if (unlikely(!cmp_len ||
510 cmp_len > lzo1x_worst_compress(unc_len))) {
511 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
512 ret = -1;
513 break;
514 }
515
516 *(size_t *)cmp = cmp_len;
517
518 /*
519 * Given we are writing one page at a time to disk, we copy
520 * that much from the buffer, although the last bit will likely
521 * be smaller than full page. This is OK - we saved the length
522 * of the compressed data, so any garbage at the end will be
523 * discarded when we read it.
524 */
525 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
526 memcpy(page, cmp + off, PAGE_SIZE);
527
528 ret = swap_write_page(handle, page, &bio);
529 if (ret)
530 goto out_finish;
531 }
532 }
533
534out_finish:
535 err2 = hib_wait_on_bio_chain(&bio);
536 do_gettimeofday(&stop);
537 if (!ret)
538 ret = err2;
539 if (!ret)
540 printk(KERN_CONT "\b\b\b\bdone\n");
541 else
542 printk(KERN_CONT "\n");
543 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
544
545 vfree(cmp);
546 vfree(unc);
547 vfree(wrk);
548 free_page((unsigned long)page);
549
550 return ret;
551}
552
407/** 553/**
408 * enough_swap - Make sure we have enough swap to save the image. 554 * enough_swap - Make sure we have enough swap to save the image.
409 * 555 *
@@ -411,12 +557,16 @@ static int save_image(struct swap_map_handle *handle,
411 * space avaiable from the resume partition. 557 * space avaiable from the resume partition.
412 */ 558 */
413 559
414static int enough_swap(unsigned int nr_pages) 560static int enough_swap(unsigned int nr_pages, unsigned int flags)
415{ 561{
416 unsigned int free_swap = count_swap_pages(root_swap, 1); 562 unsigned int free_swap = count_swap_pages(root_swap, 1);
563 unsigned int required;
417 564
418 pr_debug("PM: Free swap pages: %u\n", free_swap); 565 pr_debug("PM: Free swap pages: %u\n", free_swap);
419 return free_swap > nr_pages + PAGES_FOR_IO; 566
567 required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
568 nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
569 return free_swap > required;
420} 570}
421 571
422/** 572/**
@@ -443,7 +593,7 @@ int swsusp_write(unsigned int flags)
443 printk(KERN_ERR "PM: Cannot get swap writer\n"); 593 printk(KERN_ERR "PM: Cannot get swap writer\n");
444 return error; 594 return error;
445 } 595 }
446 if (!enough_swap(pages)) { 596 if (!enough_swap(pages, flags)) {
447 printk(KERN_ERR "PM: Not enough free swap\n"); 597 printk(KERN_ERR "PM: Not enough free swap\n");
448 error = -ENOSPC; 598 error = -ENOSPC;
449 goto out_finish; 599 goto out_finish;
@@ -458,8 +608,11 @@ int swsusp_write(unsigned int flags)
458 } 608 }
459 header = (struct swsusp_info *)data_of(snapshot); 609 header = (struct swsusp_info *)data_of(snapshot);
460 error = swap_write_page(&handle, header, NULL); 610 error = swap_write_page(&handle, header, NULL);
461 if (!error) 611 if (!error) {
462 error = save_image(&handle, &snapshot, pages - 1); 612 error = (flags & SF_NOCOMPRESS_MODE) ?
613 save_image(&handle, &snapshot, pages - 1) :
614 save_image_lzo(&handle, &snapshot, pages - 1);
615 }
463out_finish: 616out_finish:
464 error = swap_writer_finish(&handle, flags, error); 617 error = swap_writer_finish(&handle, flags, error);
465 return error; 618 return error;
@@ -590,9 +743,152 @@ static int load_image(struct swap_map_handle *handle,
590} 743}
591 744
592/** 745/**
746 * load_image_lzo - Load compressed image data and decompress them with LZO.
747 * @handle: Swap map handle to use for loading data.
748 * @snapshot: Image to copy uncompressed data into.
749 * @nr_to_read: Number of pages to load.
750 */
751static int load_image_lzo(struct swap_map_handle *handle,
752 struct snapshot_handle *snapshot,
753 unsigned int nr_to_read)
754{
755 unsigned int m;
756 int error = 0;
757 struct bio *bio;
758 struct timeval start;
759 struct timeval stop;
760 unsigned nr_pages;
761 size_t i, off, unc_len, cmp_len;
762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
763
764 for (i = 0; i < LZO_CMP_PAGES; i++) {
765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
766 if (!page[i]) {
767 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
768
769 while (i)
770 free_page((unsigned long)page[--i]);
771
772 return -ENOMEM;
773 }
774 }
775
776 unc = vmalloc(LZO_UNC_SIZE);
777 if (!unc) {
778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
779
780 for (i = 0; i < LZO_CMP_PAGES; i++)
781 free_page((unsigned long)page[i]);
782
783 return -ENOMEM;
784 }
785
786 cmp = vmalloc(LZO_CMP_SIZE);
787 if (!cmp) {
788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
789
790 vfree(unc);
791 for (i = 0; i < LZO_CMP_PAGES; i++)
792 free_page((unsigned long)page[i]);
793
794 return -ENOMEM;
795 }
796
797 printk(KERN_INFO
798 "PM: Loading and decompressing image data (%u pages) ... ",
799 nr_to_read);
800 m = nr_to_read / 100;
801 if (!m)
802 m = 1;
803 nr_pages = 0;
804 bio = NULL;
805 do_gettimeofday(&start);
806
807 error = snapshot_write_next(snapshot);
808 if (error <= 0)
809 goto out_finish;
810
811 for (;;) {
812 error = swap_read_page(handle, page[0], NULL); /* sync */
813 if (error)
814 break;
815
816 cmp_len = *(size_t *)page[0];
817 if (unlikely(!cmp_len ||
818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
819 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
820 error = -1;
821 break;
822 }
823
824 for (off = PAGE_SIZE, i = 1;
825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
826 error = swap_read_page(handle, page[i], &bio);
827 if (error)
828 goto out_finish;
829 }
830
831 error = hib_wait_on_bio_chain(&bio); /* need all data now */
832 if (error)
833 goto out_finish;
834
835 for (off = 0, i = 0;
836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
837 memcpy(cmp + off, page[i], PAGE_SIZE);
838 }
839
840 unc_len = LZO_UNC_SIZE;
841 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
842 unc, &unc_len);
843 if (error < 0) {
844 printk(KERN_ERR "PM: LZO decompression failed\n");
845 break;
846 }
847
848 if (unlikely(!unc_len ||
849 unc_len > LZO_UNC_SIZE ||
850 unc_len & (PAGE_SIZE - 1))) {
851 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
852 error = -1;
853 break;
854 }
855
856 for (off = 0; off < unc_len; off += PAGE_SIZE) {
857 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
858
859 if (!(nr_pages % m))
860 printk("\b\b\b\b%3d%%", nr_pages / m);
861 nr_pages++;
862
863 error = snapshot_write_next(snapshot);
864 if (error <= 0)
865 goto out_finish;
866 }
867 }
868
869out_finish:
870 do_gettimeofday(&stop);
871 if (!error) {
872 printk("\b\b\b\bdone\n");
873 snapshot_write_finalize(snapshot);
874 if (!snapshot_image_loaded(snapshot))
875 error = -ENODATA;
876 } else
877 printk("\n");
878 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
879
880 vfree(cmp);
881 vfree(unc);
882 for (i = 0; i < LZO_CMP_PAGES; i++)
883 free_page((unsigned long)page[i]);
884
885 return error;
886}
887
888/**
593 * swsusp_read - read the hibernation image. 889 * swsusp_read - read the hibernation image.
594 * @flags_p: flags passed by the "frozen" kernel in the image header should 890 * @flags_p: flags passed by the "frozen" kernel in the image header should
595 * be written into this memeory location 891 * be written into this memory location
596 */ 892 */
597 893
598int swsusp_read(unsigned int *flags_p) 894int swsusp_read(unsigned int *flags_p)
@@ -612,8 +908,11 @@ int swsusp_read(unsigned int *flags_p)
612 goto end; 908 goto end;
613 if (!error) 909 if (!error)
614 error = swap_read_page(&handle, header, NULL); 910 error = swap_read_page(&handle, header, NULL);
615 if (!error) 911 if (!error) {
616 error = load_image(&handle, &snapshot, header->pages - 1); 912 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
913 load_image(&handle, &snapshot, header->pages - 1) :
914 load_image_lzo(&handle, &snapshot, header->pages - 1);
915 }
617 swap_reader_finish(&handle); 916 swap_reader_finish(&handle);
618end: 917end:
619 if (!error) 918 if (!error)
@@ -631,16 +930,17 @@ int swsusp_check(void)
631{ 930{
632 int error; 931 int error;
633 932
634 hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); 933 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
934 FMODE_READ, NULL);
635 if (!IS_ERR(hib_resume_bdev)) { 935 if (!IS_ERR(hib_resume_bdev)) {
636 set_blocksize(hib_resume_bdev, PAGE_SIZE); 936 set_blocksize(hib_resume_bdev, PAGE_SIZE);
637 memset(swsusp_header, 0, PAGE_SIZE); 937 clear_page(swsusp_header);
638 error = hib_bio_read_page(swsusp_resume_block, 938 error = hib_bio_read_page(swsusp_resume_block,
639 swsusp_header, NULL); 939 swsusp_header, NULL);
640 if (error) 940 if (error)
641 goto put; 941 goto put;
642 942
643 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { 943 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
644 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 944 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
645 /* Reset swap signature now */ 945 /* Reset swap signature now */
646 error = hib_bio_write_page(swsusp_resume_block, 946 error = hib_bio_write_page(swsusp_resume_block,
@@ -653,13 +953,13 @@ put:
653 if (error) 953 if (error)
654 blkdev_put(hib_resume_bdev, FMODE_READ); 954 blkdev_put(hib_resume_bdev, FMODE_READ);
655 else 955 else
656 pr_debug("PM: Signature found, resuming\n"); 956 pr_debug("PM: Image signature found, resuming\n");
657 } else { 957 } else {
658 error = PTR_ERR(hib_resume_bdev); 958 error = PTR_ERR(hib_resume_bdev);
659 } 959 }
660 960
661 if (error) 961 if (error)
662 pr_debug("PM: Error %d checking image file\n", error); 962 pr_debug("PM: Image not found (code %d)\n", error);
663 963
664 return error; 964 return error;
665} 965}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index e819e17877ca..42ddbc6f0de6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
113 if (error) 113 if (error)
114 pm_notifier_call_chain(PM_POST_RESTORE); 114 pm_notifier_call_chain(PM_POST_RESTORE);
115 } 115 }
116 if (error) 116 if (error) {
117 free_basic_memory_bitmaps();
117 atomic_inc(&snapshot_device_available); 118 atomic_inc(&snapshot_device_available);
119 }
118 data->frozen = 0; 120 data->frozen = 0;
119 data->ready = 0; 121 data->ready = 0;
120 data->platform_support = 0; 122 data->platform_support = 0;
@@ -135,9 +137,11 @@ static int snapshot_release(struct inode *inode, struct file *filp)
135 free_basic_memory_bitmaps(); 137 free_basic_memory_bitmaps();
136 data = filp->private_data; 138 data = filp->private_data;
137 free_all_swap_pages(data->swap); 139 free_all_swap_pages(data->swap);
138 if (data->frozen) 140 if (data->frozen) {
141 pm_restore_gfp_mask();
139 thaw_processes(); 142 thaw_processes();
140 pm_notifier_call_chain(data->mode == O_WRONLY ? 143 }
144 pm_notifier_call_chain(data->mode == O_RDONLY ?
141 PM_POST_HIBERNATION : PM_POST_RESTORE); 145 PM_POST_HIBERNATION : PM_POST_RESTORE);
142 atomic_inc(&snapshot_device_available); 146 atomic_inc(&snapshot_device_available);
143 147
@@ -263,6 +267,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
263 case SNAPSHOT_UNFREEZE: 267 case SNAPSHOT_UNFREEZE:
264 if (!data->frozen || data->ready) 268 if (!data->frozen || data->ready)
265 break; 269 break;
270 pm_restore_gfp_mask();
266 thaw_processes(); 271 thaw_processes();
267 usermodehelper_enable(); 272 usermodehelper_enable();
268 data->frozen = 0; 273 data->frozen = 0;
@@ -275,6 +280,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
275 error = -EPERM; 280 error = -EPERM;
276 break; 281 break;
277 } 282 }
283 pm_restore_gfp_mask();
278 error = hibernation_snapshot(data->platform_support); 284 error = hibernation_snapshot(data->platform_support);
279 if (!error) 285 if (!error)
280 error = put_user(in_suspend, (int __user *)arg); 286 error = put_user(in_suspend, (int __user *)arg);
@@ -377,6 +383,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
377 * PM_HIBERNATION_PREPARE 383 * PM_HIBERNATION_PREPARE
378 */ 384 */
379 error = suspend_devices_and_enter(PM_SUSPEND_MEM); 385 error = suspend_devices_and_enter(PM_SUSPEND_MEM);
386 data->ready = 0;
380 break; 387 break;
381 388
382 case SNAPSHOT_PLATFORM_SUPPORT: 389 case SNAPSHOT_PLATFORM_SUPPORT: