aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig65
-rw-r--r--kernel/power/disk.c204
-rw-r--r--kernel/power/main.c171
-rw-r--r--kernel/power/power.h90
-rw-r--r--kernel/power/snapshot.c31
-rw-r--r--kernel/power/swap.c33
-rw-r--r--kernel/power/swsusp.c48
-rw-r--r--kernel/power/user.c109
8 files changed, 506 insertions, 245 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 8e186c678149..ef9b802738a5 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,9 +44,30 @@ config PM_VERBOSE
44 ---help--- 44 ---help---
45 This option enables verbose messages from the Power Management code. 45 This option enables verbose messages from the Power Management code.
46 46
47config CAN_PM_TRACE
48 def_bool y
49 depends on PM_DEBUG && PM_SLEEP && EXPERIMENTAL
50
47config PM_TRACE 51config PM_TRACE
52 bool
53 help
54 This enables code to save the last PM event point across
55 reboot. The architecture needs to support this, x86 for
56 example does by saving things in the RTC, see below.
57
58 The architecture specific code must provide the extern
59 functions from <linux/resume-trace.h> as well as the
60 <asm/resume-trace.h> header with a TRACE_RESUME() macro.
61
62 The way the information is presented is architecture-
63 dependent, x86 will print the information during a
64 late_initcall.
65
66config PM_TRACE_RTC
48 bool "Suspend/resume event tracing" 67 bool "Suspend/resume event tracing"
49 depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL 68 depends on CAN_PM_TRACE
69 depends on X86
70 select PM_TRACE
50 default n 71 default n
51 ---help--- 72 ---help---
52 This enables some cheesy code to save the last PM event point in the 73 This enables some cheesy code to save the last PM event point in the
@@ -63,7 +84,8 @@ config PM_TRACE
63 84
64config PM_SLEEP_SMP 85config PM_SLEEP_SMP
65 bool 86 bool
66 depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE 87 depends on SMP
88 depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
67 depends on PM_SLEEP 89 depends on PM_SLEEP
68 select HOTPLUG_CPU 90 select HOTPLUG_CPU
69 default y 91 default y
@@ -73,46 +95,29 @@ config PM_SLEEP
73 depends on SUSPEND || HIBERNATION 95 depends on SUSPEND || HIBERNATION
74 default y 96 default y
75 97
76config SUSPEND_UP_POSSIBLE
77 bool
78 depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \
79 || SUPERH || FRV
80 depends on !SMP
81 default y
82
83config SUSPEND_SMP_POSSIBLE
84 bool
85 depends on (X86 && !X86_VOYAGER) \
86 || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM
87 depends on SMP
88 default y
89
90config SUSPEND 98config SUSPEND
91 bool "Suspend to RAM and standby" 99 bool "Suspend to RAM and standby"
92 depends on PM 100 depends on PM && ARCH_SUSPEND_POSSIBLE
93 depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE
94 default y 101 default y
95 ---help--- 102 ---help---
96 Allow the system to enter sleep states in which main memory is 103 Allow the system to enter sleep states in which main memory is
97 powered and thus its contents are preserved, such as the 104 powered and thus its contents are preserved, such as the
98 suspend-to-RAM state (i.e. the ACPI S3 state). 105 suspend-to-RAM state (e.g. the ACPI S3 state).
99 106
100config HIBERNATION_UP_POSSIBLE 107config SUSPEND_FREEZER
101 bool 108 bool "Enable freezer for suspend to RAM/standby" \
102 depends on X86 || PPC64_SWSUSP || PPC32 109 if ARCH_WANTS_FREEZER_CONTROL || BROKEN
103 depends on !SMP 110 depends on SUSPEND
104 default y 111 default y
112 help
113 This allows you to turn off the freezer for suspend. If this is
114 done, no tasks are frozen for suspend to RAM/standby.
105 115
106config HIBERNATION_SMP_POSSIBLE 116 Turning OFF this setting is NOT recommended! If in doubt, say Y.
107 bool
108 depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP
109 depends on SMP
110 default y
111 117
112config HIBERNATION 118config HIBERNATION
113 bool "Hibernation (aka 'suspend to disk')" 119 bool "Hibernation (aka 'suspend to disk')"
114 depends on PM && SWAP 120 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
115 depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
116 ---help--- 121 ---help---
117 Enable the suspend to disk (STD) functionality, which is usually 122 Enable the suspend to disk (STD) functionality, which is usually
118 called "hibernation" in user interfaces. STD checkpoints the 123 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b138b431e271..d09da0895174 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -54,8 +54,8 @@ static struct platform_hibernation_ops *hibernation_ops;
54 54
55void hibernation_set_ops(struct platform_hibernation_ops *ops) 55void hibernation_set_ops(struct platform_hibernation_ops *ops)
56{ 56{
57 if (ops && !(ops->start && ops->pre_snapshot && ops->finish 57 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
58 && ops->prepare && ops->enter && ops->pre_restore 58 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
59 && ops->restore_cleanup)) { 59 && ops->restore_cleanup)) {
60 WARN_ON(1); 60 WARN_ON(1);
61 return; 61 return;
@@ -70,15 +70,55 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
70 mutex_unlock(&pm_mutex); 70 mutex_unlock(&pm_mutex);
71} 71}
72 72
73#ifdef CONFIG_PM_DEBUG
74static void hibernation_debug_sleep(void)
75{
76 printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
77 mdelay(5000);
78}
79
80static int hibernation_testmode(int mode)
81{
82 if (hibernation_mode == mode) {
83 hibernation_debug_sleep();
84 return 1;
85 }
86 return 0;
87}
88
89static int hibernation_test(int level)
90{
91 if (pm_test_level == level) {
92 hibernation_debug_sleep();
93 return 1;
94 }
95 return 0;
96}
97#else /* !CONFIG_PM_DEBUG */
98static int hibernation_testmode(int mode) { return 0; }
99static int hibernation_test(int level) { return 0; }
100#endif /* !CONFIG_PM_DEBUG */
101
73/** 102/**
74 * platform_start - tell the platform driver that we're starting 103 * platform_begin - tell the platform driver that we're starting
75 * hibernation 104 * hibernation
76 */ 105 */
77 106
78static int platform_start(int platform_mode) 107static int platform_begin(int platform_mode)
79{ 108{
80 return (platform_mode && hibernation_ops) ? 109 return (platform_mode && hibernation_ops) ?
81 hibernation_ops->start() : 0; 110 hibernation_ops->begin() : 0;
111}
112
113/**
114 * platform_end - tell the platform driver that we've entered the
115 * working state
116 */
117
118static void platform_end(int platform_mode)
119{
120 if (platform_mode && hibernation_ops)
121 hibernation_ops->end();
82} 122}
83 123
84/** 124/**
@@ -162,19 +202,25 @@ int create_image(int platform_mode)
162 */ 202 */
163 error = device_power_down(PMSG_FREEZE); 203 error = device_power_down(PMSG_FREEZE);
164 if (error) { 204 if (error) {
165 printk(KERN_ERR "Some devices failed to power down, " 205 printk(KERN_ERR "PM: Some devices failed to power down, "
166 KERN_ERR "aborting suspend\n"); 206 "aborting hibernation\n");
167 goto Enable_irqs; 207 goto Enable_irqs;
168 } 208 }
169 209
210 if (hibernation_test(TEST_CORE))
211 goto Power_up;
212
213 in_suspend = 1;
170 save_processor_state(); 214 save_processor_state();
171 error = swsusp_arch_suspend(); 215 error = swsusp_arch_suspend();
172 if (error) 216 if (error)
173 printk(KERN_ERR "Error %d while creating the image\n", error); 217 printk(KERN_ERR "PM: Error %d creating hibernation image\n",
218 error);
174 /* Restore control flow magically appears here */ 219 /* Restore control flow magically appears here */
175 restore_processor_state(); 220 restore_processor_state();
176 if (!in_suspend) 221 if (!in_suspend)
177 platform_leave(platform_mode); 222 platform_leave(platform_mode);
223 Power_up:
178 /* NOTE: device_power_up() is just a resume() for devices 224 /* NOTE: device_power_up() is just a resume() for devices
179 * that suspended with irqs off ... no overall powerup. 225 * that suspended with irqs off ... no overall powerup.
180 */ 226 */
@@ -202,36 +248,90 @@ int hibernation_snapshot(int platform_mode)
202 if (error) 248 if (error)
203 return error; 249 return error;
204 250
205 error = platform_start(platform_mode); 251 error = platform_begin(platform_mode);
206 if (error) 252 if (error)
207 return error; 253 goto Close;
208 254
209 suspend_console(); 255 suspend_console();
210 error = device_suspend(PMSG_FREEZE); 256 error = device_suspend(PMSG_FREEZE);
211 if (error) 257 if (error)
212 goto Resume_console; 258 goto Resume_console;
213 259
214 error = platform_pre_snapshot(platform_mode); 260 if (hibernation_test(TEST_DEVICES))
215 if (error)
216 goto Resume_devices; 261 goto Resume_devices;
217 262
263 error = platform_pre_snapshot(platform_mode);
264 if (error || hibernation_test(TEST_PLATFORM))
265 goto Finish;
266
218 error = disable_nonboot_cpus(); 267 error = disable_nonboot_cpus();
219 if (!error) { 268 if (!error) {
220 if (hibernation_mode != HIBERNATION_TEST) { 269 if (hibernation_test(TEST_CPUS))
221 in_suspend = 1; 270 goto Enable_cpus;
222 error = create_image(platform_mode); 271
223 /* Control returns here after successful restore */ 272 if (hibernation_testmode(HIBERNATION_TEST))
224 } else { 273 goto Enable_cpus;
225 printk("swsusp debug: Waiting for 5 seconds.\n"); 274
226 mdelay(5000); 275 error = create_image(platform_mode);
227 } 276 /* Control returns here after successful restore */
228 } 277 }
278 Enable_cpus:
229 enable_nonboot_cpus(); 279 enable_nonboot_cpus();
230 Resume_devices: 280 Finish:
231 platform_finish(platform_mode); 281 platform_finish(platform_mode);
282 Resume_devices:
232 device_resume(); 283 device_resume();
233 Resume_console: 284 Resume_console:
234 resume_console(); 285 resume_console();
286 Close:
287 platform_end(platform_mode);
288 return error;
289}
290
291/**
292 * resume_target_kernel - prepare devices that need to be suspended with
293 * interrupts off, restore the contents of highmem that have not been
294 * restored yet from the image and run the low level code that will restore
295 * the remaining contents of memory and switch to the just restored target
296 * kernel.
297 */
298
299static int resume_target_kernel(void)
300{
301 int error;
302
303 local_irq_disable();
304 error = device_power_down(PMSG_PRETHAW);
305 if (error) {
306 printk(KERN_ERR "PM: Some devices failed to power down, "
307 "aborting resume\n");
308 goto Enable_irqs;
309 }
310 /* We'll ignore saved state, but this gets preempt count (etc) right */
311 save_processor_state();
312 error = restore_highmem();
313 if (!error) {
314 error = swsusp_arch_resume();
315 /*
316 * The code below is only ever reached in case of a failure.
317 * Otherwise execution continues at place where
318 * swsusp_arch_suspend() was called
319 */
320 BUG_ON(!error);
321 /* This call to restore_highmem() undos the previous one */
322 restore_highmem();
323 }
324 /*
325 * The only reason why swsusp_arch_resume() can fail is memory being
326 * very tight, so we have to free it as soon as we can to avoid
327 * subsequent failures
328 */
329 swsusp_free();
330 restore_processor_state();
331 touch_softlockup_watchdog();
332 device_power_up();
333 Enable_irqs:
334 local_irq_enable();
235 return error; 335 return error;
236} 336}
237 337
@@ -258,7 +358,7 @@ int hibernation_restore(int platform_mode)
258 if (!error) { 358 if (!error) {
259 error = disable_nonboot_cpus(); 359 error = disable_nonboot_cpus();
260 if (!error) 360 if (!error)
261 error = swsusp_resume(); 361 error = resume_target_kernel();
262 enable_nonboot_cpus(); 362 enable_nonboot_cpus();
263 } 363 }
264 platform_restore_cleanup(platform_mode); 364 platform_restore_cleanup(platform_mode);
@@ -286,9 +386,9 @@ int hibernation_platform_enter(void)
286 * hibernation_ops->finish() before saving the image, so we should let 386 * hibernation_ops->finish() before saving the image, so we should let
287 * the firmware know that we're going to enter the sleep state after all 387 * the firmware know that we're going to enter the sleep state after all
288 */ 388 */
289 error = hibernation_ops->start(); 389 error = hibernation_ops->begin();
290 if (error) 390 if (error)
291 return error; 391 goto Close;
292 392
293 suspend_console(); 393 suspend_console();
294 error = device_suspend(PMSG_SUSPEND); 394 error = device_suspend(PMSG_SUSPEND);
@@ -322,6 +422,8 @@ int hibernation_platform_enter(void)
322 device_resume(); 422 device_resume();
323 Resume_console: 423 Resume_console:
324 resume_console(); 424 resume_console();
425 Close:
426 hibernation_ops->end();
325 return error; 427 return error;
326} 428}
327 429
@@ -352,24 +454,17 @@ static void power_down(void)
352 * Valid image is on the disk, if we continue we risk serious data 454 * Valid image is on the disk, if we continue we risk serious data
353 * corruption after resume. 455 * corruption after resume.
354 */ 456 */
355 printk(KERN_CRIT "Please power me down manually\n"); 457 printk(KERN_CRIT "PM: Please power down manually\n");
356 while(1); 458 while(1);
357} 459}
358 460
359static void unprepare_processes(void)
360{
361 thaw_processes();
362 pm_restore_console();
363}
364
365static int prepare_processes(void) 461static int prepare_processes(void)
366{ 462{
367 int error = 0; 463 int error = 0;
368 464
369 pm_prepare_console();
370 if (freeze_processes()) { 465 if (freeze_processes()) {
371 error = -EBUSY; 466 error = -EBUSY;
372 unprepare_processes(); 467 thaw_processes();
373 } 468 }
374 return error; 469 return error;
375} 470}
@@ -389,6 +484,7 @@ int hibernate(void)
389 goto Unlock; 484 goto Unlock;
390 } 485 }
391 486
487 pm_prepare_console();
392 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 488 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
393 if (error) 489 if (error)
394 goto Exit; 490 goto Exit;
@@ -398,7 +494,7 @@ int hibernate(void)
398 if (error) 494 if (error)
399 goto Exit; 495 goto Exit;
400 496
401 printk("Syncing filesystems ... "); 497 printk(KERN_INFO "PM: Syncing filesystems ... ");
402 sys_sync(); 498 sys_sync();
403 printk("done.\n"); 499 printk("done.\n");
404 500
@@ -406,11 +502,12 @@ int hibernate(void)
406 if (error) 502 if (error)
407 goto Finish; 503 goto Finish;
408 504
409 if (hibernation_mode == HIBERNATION_TESTPROC) { 505 if (hibernation_test(TEST_FREEZER))
410 printk("swsusp debug: Waiting for 5 seconds.\n");
411 mdelay(5000);
412 goto Thaw; 506 goto Thaw;
413 } 507
508 if (hibernation_testmode(HIBERNATION_TESTPROC))
509 goto Thaw;
510
414 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); 511 error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
415 if (in_suspend && !error) { 512 if (in_suspend && !error) {
416 unsigned int flags = 0; 513 unsigned int flags = 0;
@@ -427,11 +524,12 @@ int hibernate(void)
427 swsusp_free(); 524 swsusp_free();
428 } 525 }
429 Thaw: 526 Thaw:
430 unprepare_processes(); 527 thaw_processes();
431 Finish: 528 Finish:
432 free_basic_memory_bitmaps(); 529 free_basic_memory_bitmaps();
433 Exit: 530 Exit:
434 pm_notifier_call_chain(PM_POST_HIBERNATION); 531 pm_notifier_call_chain(PM_POST_HIBERNATION);
532 pm_restore_console();
435 atomic_inc(&snapshot_device_available); 533 atomic_inc(&snapshot_device_available);
436 Unlock: 534 Unlock:
437 mutex_unlock(&pm_mutex); 535 mutex_unlock(&pm_mutex);
@@ -473,22 +571,23 @@ static int software_resume(void)
473 return -ENOENT; 571 return -ENOENT;
474 } 572 }
475 swsusp_resume_device = name_to_dev_t(resume_file); 573 swsusp_resume_device = name_to_dev_t(resume_file);
476 pr_debug("swsusp: Resume From Partition %s\n", resume_file); 574 pr_debug("PM: Resume from partition %s\n", resume_file);
477 } else { 575 } else {
478 pr_debug("swsusp: Resume From Partition %d:%d\n", 576 pr_debug("PM: Resume from partition %d:%d\n",
479 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); 577 MAJOR(swsusp_resume_device),
578 MINOR(swsusp_resume_device));
480 } 579 }
481 580
482 if (noresume) { 581 if (noresume) {
483 /** 582 /**
484 * FIXME: If noresume is specified, we need to find the partition 583 * FIXME: If noresume is specified, we need to find the
485 * and reset it back to normal swap space. 584 * partition and reset it back to normal swap space.
486 */ 585 */
487 mutex_unlock(&pm_mutex); 586 mutex_unlock(&pm_mutex);
488 return 0; 587 return 0;
489 } 588 }
490 589
491 pr_debug("PM: Checking swsusp image.\n"); 590 pr_debug("PM: Checking hibernation image.\n");
492 error = swsusp_check(); 591 error = swsusp_check();
493 if (error) 592 if (error)
494 goto Unlock; 593 goto Unlock;
@@ -499,6 +598,11 @@ static int software_resume(void)
499 goto Unlock; 598 goto Unlock;
500 } 599 }
501 600
601 pm_prepare_console();
602 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
603 if (error)
604 goto Finish;
605
502 error = create_basic_memory_bitmaps(); 606 error = create_basic_memory_bitmaps();
503 if (error) 607 if (error)
504 goto Finish; 608 goto Finish;
@@ -510,7 +614,7 @@ static int software_resume(void)
510 goto Done; 614 goto Done;
511 } 615 }
512 616
513 pr_debug("PM: Reading swsusp image.\n"); 617 pr_debug("PM: Reading hibernation image.\n");
514 618
515 error = swsusp_read(&flags); 619 error = swsusp_read(&flags);
516 if (!error) 620 if (!error)
@@ -518,10 +622,12 @@ static int software_resume(void)
518 622
519 printk(KERN_ERR "PM: Restore failed, recovering.\n"); 623 printk(KERN_ERR "PM: Restore failed, recovering.\n");
520 swsusp_free(); 624 swsusp_free();
521 unprepare_processes(); 625 thaw_processes();
522 Done: 626 Done:
523 free_basic_memory_bitmaps(); 627 free_basic_memory_bitmaps();
524 Finish: 628 Finish:
629 pm_notifier_call_chain(PM_POST_RESTORE);
630 pm_restore_console();
525 atomic_inc(&snapshot_device_available); 631 atomic_inc(&snapshot_device_available);
526 /* For success case, the suspend path will release the lock */ 632 /* For success case, the suspend path will release the lock */
527 Unlock: 633 Unlock:
@@ -636,7 +742,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
636 error = -EINVAL; 742 error = -EINVAL;
637 743
638 if (!error) 744 if (!error)
639 pr_debug("PM: suspend-to-disk mode set to '%s'\n", 745 pr_debug("PM: Hibernation mode set to '%s'\n",
640 hibernation_modes[mode]); 746 hibernation_modes[mode]);
641 mutex_unlock(&pm_mutex); 747 mutex_unlock(&pm_mutex);
642 return error ? error : n; 748 return error ? error : n;
@@ -668,7 +774,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
668 mutex_lock(&pm_mutex); 774 mutex_lock(&pm_mutex);
669 swsusp_resume_device = res; 775 swsusp_resume_device = res;
670 mutex_unlock(&pm_mutex); 776 mutex_unlock(&pm_mutex);
671 printk("Attempting manual resume\n"); 777 printk(KERN_INFO "PM: Starting manual resume from disk\n");
672 noresume = 0; 778 noresume = 0;
673 software_resume(); 779 software_resume();
674 ret = n; 780 ret = n;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index efc08360e627..6a6d5eb3524e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,13 +24,112 @@
24 24
25#include "power.h" 25#include "power.h"
26 26
27BLOCKING_NOTIFIER_HEAD(pm_chain_head);
28
29DEFINE_MUTEX(pm_mutex); 27DEFINE_MUTEX(pm_mutex);
30 28
31unsigned int pm_flags; 29unsigned int pm_flags;
32EXPORT_SYMBOL(pm_flags); 30EXPORT_SYMBOL(pm_flags);
33 31
32#ifdef CONFIG_PM_SLEEP
33
34/* Routines for PM-transition notifications */
35
36static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
37
38int register_pm_notifier(struct notifier_block *nb)
39{
40 return blocking_notifier_chain_register(&pm_chain_head, nb);
41}
42EXPORT_SYMBOL_GPL(register_pm_notifier);
43
44int unregister_pm_notifier(struct notifier_block *nb)
45{
46 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
47}
48EXPORT_SYMBOL_GPL(unregister_pm_notifier);
49
50int pm_notifier_call_chain(unsigned long val)
51{
52 return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
53 == NOTIFY_BAD) ? -EINVAL : 0;
54}
55
56#ifdef CONFIG_PM_DEBUG
57int pm_test_level = TEST_NONE;
58
59static int suspend_test(int level)
60{
61 if (pm_test_level == level) {
62 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
63 mdelay(5000);
64 return 1;
65 }
66 return 0;
67}
68
69static const char * const pm_tests[__TEST_AFTER_LAST] = {
70 [TEST_NONE] = "none",
71 [TEST_CORE] = "core",
72 [TEST_CPUS] = "processors",
73 [TEST_PLATFORM] = "platform",
74 [TEST_DEVICES] = "devices",
75 [TEST_FREEZER] = "freezer",
76};
77
78static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
79 char *buf)
80{
81 char *s = buf;
82 int level;
83
84 for (level = TEST_FIRST; level <= TEST_MAX; level++)
85 if (pm_tests[level]) {
86 if (level == pm_test_level)
87 s += sprintf(s, "[%s] ", pm_tests[level]);
88 else
89 s += sprintf(s, "%s ", pm_tests[level]);
90 }
91
92 if (s != buf)
93 /* convert the last space to a newline */
94 *(s-1) = '\n';
95
96 return (s - buf);
97}
98
99static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
100 const char *buf, size_t n)
101{
102 const char * const *s;
103 int level;
104 char *p;
105 int len;
106 int error = -EINVAL;
107
108 p = memchr(buf, '\n', n);
109 len = p ? p - buf : n;
110
111 mutex_lock(&pm_mutex);
112
113 level = TEST_FIRST;
114 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
115 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
116 pm_test_level = level;
117 error = 0;
118 break;
119 }
120
121 mutex_unlock(&pm_mutex);
122
123 return error ? error : n;
124}
125
126power_attr(pm_test);
127#else /* !CONFIG_PM_DEBUG */
128static inline int suspend_test(int level) { return 0; }
129#endif /* !CONFIG_PM_DEBUG */
130
131#endif /* CONFIG_PM_SLEEP */
132
34#ifdef CONFIG_SUSPEND 133#ifdef CONFIG_SUSPEND
35 134
36/* This is just an arbitrary number */ 135/* This is just an arbitrary number */
@@ -76,13 +175,13 @@ static int suspend_prepare(void)
76 if (!suspend_ops || !suspend_ops->enter) 175 if (!suspend_ops || !suspend_ops->enter)
77 return -EPERM; 176 return -EPERM;
78 177
178 pm_prepare_console();
179
79 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 180 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
80 if (error) 181 if (error)
81 goto Finish; 182 goto Finish;
82 183
83 pm_prepare_console(); 184 if (suspend_freeze_processes()) {
84
85 if (freeze_processes()) {
86 error = -EAGAIN; 185 error = -EAGAIN;
87 goto Thaw; 186 goto Thaw;
88 } 187 }
@@ -100,10 +199,10 @@ static int suspend_prepare(void)
100 return 0; 199 return 0;
101 200
102 Thaw: 201 Thaw:
103 thaw_processes(); 202 suspend_thaw_processes();
104 pm_restore_console();
105 Finish: 203 Finish:
106 pm_notifier_call_chain(PM_POST_SUSPEND); 204 pm_notifier_call_chain(PM_POST_SUSPEND);
205 pm_restore_console();
107 return error; 206 return error;
108} 207}
109 208
@@ -133,10 +232,13 @@ static int suspend_enter(suspend_state_t state)
133 BUG_ON(!irqs_disabled()); 232 BUG_ON(!irqs_disabled());
134 233
135 if ((error = device_power_down(PMSG_SUSPEND))) { 234 if ((error = device_power_down(PMSG_SUSPEND))) {
136 printk(KERN_ERR "Some devices failed to power down\n"); 235 printk(KERN_ERR "PM: Some devices failed to power down\n");
137 goto Done; 236 goto Done;
138 } 237 }
139 error = suspend_ops->enter(state); 238
239 if (!suspend_test(TEST_CORE))
240 error = suspend_ops->enter(state);
241
140 device_power_up(); 242 device_power_up();
141 Done: 243 Done:
142 arch_suspend_enable_irqs(); 244 arch_suspend_enable_irqs();
@@ -145,8 +247,8 @@ static int suspend_enter(suspend_state_t state)
145} 247}
146 248
147/** 249/**
148 * suspend_devices_and_enter - suspend devices and enter the desired system sleep 250 * suspend_devices_and_enter - suspend devices and enter the desired system
149 * state. 251 * sleep state.
150 * @state: state to enter 252 * @state: state to enter
151 */ 253 */
152int suspend_devices_and_enter(suspend_state_t state) 254int suspend_devices_and_enter(suspend_state_t state)
@@ -156,33 +258,45 @@ int suspend_devices_and_enter(suspend_state_t state)
156 if (!suspend_ops) 258 if (!suspend_ops)
157 return -ENOSYS; 259 return -ENOSYS;
158 260
159 if (suspend_ops->set_target) { 261 if (suspend_ops->begin) {
160 error = suspend_ops->set_target(state); 262 error = suspend_ops->begin(state);
161 if (error) 263 if (error)
162 return error; 264 goto Close;
163 } 265 }
164 suspend_console(); 266 suspend_console();
165 error = device_suspend(PMSG_SUSPEND); 267 error = device_suspend(PMSG_SUSPEND);
166 if (error) { 268 if (error) {
167 printk(KERN_ERR "Some devices failed to suspend\n"); 269 printk(KERN_ERR "PM: Some devices failed to suspend\n");
168 goto Resume_console; 270 goto Resume_console;
169 } 271 }
272
273 if (suspend_test(TEST_DEVICES))
274 goto Resume_devices;
275
170 if (suspend_ops->prepare) { 276 if (suspend_ops->prepare) {
171 error = suspend_ops->prepare(); 277 error = suspend_ops->prepare();
172 if (error) 278 if (error)
173 goto Resume_devices; 279 goto Resume_devices;
174 } 280 }
281
282 if (suspend_test(TEST_PLATFORM))
283 goto Finish;
284
175 error = disable_nonboot_cpus(); 285 error = disable_nonboot_cpus();
176 if (!error) 286 if (!error && !suspend_test(TEST_CPUS))
177 suspend_enter(state); 287 suspend_enter(state);
178 288
179 enable_nonboot_cpus(); 289 enable_nonboot_cpus();
290 Finish:
180 if (suspend_ops->finish) 291 if (suspend_ops->finish)
181 suspend_ops->finish(); 292 suspend_ops->finish();
182 Resume_devices: 293 Resume_devices:
183 device_resume(); 294 device_resume();
184 Resume_console: 295 Resume_console:
185 resume_console(); 296 resume_console();
297 Close:
298 if (suspend_ops->end)
299 suspend_ops->end();
186 return error; 300 return error;
187} 301}
188 302
@@ -194,9 +308,9 @@ int suspend_devices_and_enter(suspend_state_t state)
194 */ 308 */
195static void suspend_finish(void) 309static void suspend_finish(void)
196{ 310{
197 thaw_processes(); 311 suspend_thaw_processes();
198 pm_restore_console();
199 pm_notifier_call_chain(PM_POST_SUSPEND); 312 pm_notifier_call_chain(PM_POST_SUSPEND);
313 pm_restore_console();
200} 314}
201 315
202 316
@@ -238,17 +352,22 @@ static int enter_state(suspend_state_t state)
238 if (!mutex_trylock(&pm_mutex)) 352 if (!mutex_trylock(&pm_mutex))
239 return -EBUSY; 353 return -EBUSY;
240 354
241 printk("Syncing filesystems ... "); 355 printk(KERN_INFO "PM: Syncing filesystems ... ");
242 sys_sync(); 356 sys_sync();
243 printk("done.\n"); 357 printk("done.\n");
244 358
245 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 359 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
246 if ((error = suspend_prepare())) 360 error = suspend_prepare();
361 if (error)
247 goto Unlock; 362 goto Unlock;
248 363
364 if (suspend_test(TEST_FREEZER))
365 goto Finish;
366
249 pr_debug("PM: Entering %s sleep\n", pm_states[state]); 367 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
250 error = suspend_devices_and_enter(state); 368 error = suspend_devices_and_enter(state);
251 369
370 Finish:
252 pr_debug("PM: Finishing wakeup.\n"); 371 pr_debug("PM: Finishing wakeup.\n");
253 suspend_finish(); 372 suspend_finish();
254 Unlock: 373 Unlock:
@@ -369,18 +488,18 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
369} 488}
370 489
371power_attr(pm_trace); 490power_attr(pm_trace);
491#endif /* CONFIG_PM_TRACE */
372 492
373static struct attribute * g[] = { 493static struct attribute * g[] = {
374 &state_attr.attr, 494 &state_attr.attr,
495#ifdef CONFIG_PM_TRACE
375 &pm_trace_attr.attr, 496 &pm_trace_attr.attr,
497#endif
498#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_DEBUG)
499 &pm_test_attr.attr,
500#endif
376 NULL, 501 NULL,
377}; 502};
378#else
379static struct attribute * g[] = {
380 &state_attr.attr,
381 NULL,
382};
383#endif /* CONFIG_PM_TRACE */
384 503
385static struct attribute_group attr_group = { 504static struct attribute_group attr_group = {
386 .attrs = g, 505 .attrs = g,
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 2093c3a9a994..700f44ec8406 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,5 +1,7 @@
1#include <linux/suspend.h> 1#include <linux/suspend.h>
2#include <linux/suspend_ioctls.h>
2#include <linux/utsname.h> 3#include <linux/utsname.h>
4#include <linux/freezer.h>
3 5
4struct swsusp_info { 6struct swsusp_info {
5 struct new_utsname uts; 7 struct new_utsname uts;
@@ -128,42 +130,12 @@ struct snapshot_handle {
128#define data_of(handle) ((handle).buffer + (handle).buf_offset) 130#define data_of(handle) ((handle).buffer + (handle).buf_offset)
129 131
130extern unsigned int snapshot_additional_pages(struct zone *zone); 132extern unsigned int snapshot_additional_pages(struct zone *zone);
133extern unsigned long snapshot_get_image_size(void);
131extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); 134extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
132extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); 135extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
133extern void snapshot_write_finalize(struct snapshot_handle *handle); 136extern void snapshot_write_finalize(struct snapshot_handle *handle);
134extern int snapshot_image_loaded(struct snapshot_handle *handle); 137extern int snapshot_image_loaded(struct snapshot_handle *handle);
135 138
136/*
137 * This structure is used to pass the values needed for the identification
138 * of the resume swap area from a user space to the kernel via the
139 * SNAPSHOT_SET_SWAP_AREA ioctl
140 */
141struct resume_swap_area {
142 loff_t offset;
143 u_int32_t dev;
144} __attribute__((packed));
145
146#define SNAPSHOT_IOC_MAGIC '3'
147#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1)
148#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2)
149#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
150#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4)
151#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5)
152#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
153#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
154#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
155#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9)
156#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
157#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
158#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
159#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \
160 struct resume_swap_area)
161#define SNAPSHOT_IOC_MAXNR 13
162
163#define PMOPS_PREPARE 1
164#define PMOPS_ENTER 2
165#define PMOPS_FINISH 3
166
167/* If unset, the snapshot device cannot be open. */ 139/* If unset, the snapshot device cannot be open. */
168extern atomic_t snapshot_device_available; 140extern atomic_t snapshot_device_available;
169 141
@@ -181,7 +153,6 @@ extern int swsusp_swap_in_use(void);
181extern int swsusp_check(void); 153extern int swsusp_check(void);
182extern int swsusp_shrink_memory(void); 154extern int swsusp_shrink_memory(void);
183extern void swsusp_free(void); 155extern void swsusp_free(void);
184extern int swsusp_resume(void);
185extern int swsusp_read(unsigned int *flags_p); 156extern int swsusp_read(unsigned int *flags_p);
186extern int swsusp_write(unsigned int flags); 157extern int swsusp_write(unsigned int flags);
187extern void swsusp_close(void); 158extern void swsusp_close(void);
@@ -201,11 +172,56 @@ static inline int suspend_devices_and_enter(suspend_state_t state)
201} 172}
202#endif /* !CONFIG_SUSPEND */ 173#endif /* !CONFIG_SUSPEND */
203 174
204/* kernel/power/common.c */ 175#ifdef CONFIG_PM_SLEEP
205extern struct blocking_notifier_head pm_chain_head; 176/* kernel/power/main.c */
177extern int pm_notifier_call_chain(unsigned long val);
178#endif
179
180#ifdef CONFIG_HIGHMEM
181unsigned int count_highmem_pages(void);
182int restore_highmem(void);
183#else
184static inline unsigned int count_highmem_pages(void) { return 0; }
185static inline int restore_highmem(void) { return 0; }
186#endif
187
188/*
189 * Suspend test levels
190 */
191enum {
192 /* keep first */
193 TEST_NONE,
194 TEST_CORE,
195 TEST_CPUS,
196 TEST_PLATFORM,
197 TEST_DEVICES,
198 TEST_FREEZER,
199 /* keep last */
200 __TEST_AFTER_LAST
201};
202
203#define TEST_FIRST TEST_NONE
204#define TEST_MAX (__TEST_AFTER_LAST - 1)
205
206extern int pm_test_level;
207
208#ifdef CONFIG_SUSPEND_FREEZER
209static inline int suspend_freeze_processes(void)
210{
211 return freeze_processes();
212}
206 213
207static inline int pm_notifier_call_chain(unsigned long val) 214static inline void suspend_thaw_processes(void)
208{ 215{
209 return (blocking_notifier_call_chain(&pm_chain_head, val, NULL) 216 thaw_processes();
210 == NOTIFY_BAD) ? -EINVAL : 0;
211} 217}
218#else
219static inline int suspend_freeze_processes(void)
220{
221 return 0;
222}
223
224static inline void suspend_thaw_processes(void)
225{
226}
227#endif
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 78039b477d2b..f6a5df934f8d 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -635,7 +635,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
635 region->end_pfn = end_pfn; 635 region->end_pfn = end_pfn;
636 list_add_tail(&region->list, &nosave_regions); 636 list_add_tail(&region->list, &nosave_regions);
637 Report: 637 Report:
638 printk("swsusp: Registered nosave memory region: %016lx - %016lx\n", 638 printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
639 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); 639 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
640} 640}
641 641
@@ -704,7 +704,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
704 list_for_each_entry(region, &nosave_regions, list) { 704 list_for_each_entry(region, &nosave_regions, list) {
705 unsigned long pfn; 705 unsigned long pfn;
706 706
707 printk("swsusp: Marking nosave pages: %016lx - %016lx\n", 707 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
708 region->start_pfn << PAGE_SHIFT, 708 region->start_pfn << PAGE_SHIFT,
709 region->end_pfn << PAGE_SHIFT); 709 region->end_pfn << PAGE_SHIFT);
710 710
@@ -749,7 +749,7 @@ int create_basic_memory_bitmaps(void)
749 free_pages_map = bm2; 749 free_pages_map = bm2;
750 mark_nosave_pages(forbidden_pages_map); 750 mark_nosave_pages(forbidden_pages_map);
751 751
752 printk("swsusp: Basic memory bitmaps created\n"); 752 pr_debug("PM: Basic memory bitmaps created\n");
753 753
754 return 0; 754 return 0;
755 755
@@ -784,7 +784,7 @@ void free_basic_memory_bitmaps(void)
784 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 784 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
785 kfree(bm2); 785 kfree(bm2);
786 786
787 printk("swsusp: Basic memory bitmaps freed\n"); 787 pr_debug("PM: Basic memory bitmaps freed\n");
788} 788}
789 789
790/** 790/**
@@ -872,7 +872,6 @@ unsigned int count_highmem_pages(void)
872} 872}
873#else 873#else
874static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; } 874static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
875static inline unsigned int count_highmem_pages(void) { return 0; }
876#endif /* CONFIG_HIGHMEM */ 875#endif /* CONFIG_HIGHMEM */
877 876
878/** 877/**
@@ -1089,7 +1088,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1089 } 1088 }
1090 1089
1091 nr_pages += count_pages_for_highmem(nr_highmem); 1090 nr_pages += count_pages_for_highmem(nr_highmem);
1092 pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n", 1091 pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
1093 nr_pages, PAGES_FOR_IO, meta, free); 1092 nr_pages, PAGES_FOR_IO, meta, free);
1094 1093
1095 return free > nr_pages + PAGES_FOR_IO + meta; 1094 return free > nr_pages + PAGES_FOR_IO + meta;
@@ -1202,20 +1201,20 @@ asmlinkage int swsusp_save(void)
1202{ 1201{
1203 unsigned int nr_pages, nr_highmem; 1202 unsigned int nr_pages, nr_highmem;
1204 1203
1205 printk("swsusp: critical section: \n"); 1204 printk(KERN_INFO "PM: Creating hibernation image: \n");
1206 1205
1207 drain_local_pages(); 1206 drain_local_pages();
1208 nr_pages = count_data_pages(); 1207 nr_pages = count_data_pages();
1209 nr_highmem = count_highmem_pages(); 1208 nr_highmem = count_highmem_pages();
1210 printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem); 1209 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1211 1210
1212 if (!enough_free_mem(nr_pages, nr_highmem)) { 1211 if (!enough_free_mem(nr_pages, nr_highmem)) {
1213 printk(KERN_ERR "swsusp: Not enough free memory\n"); 1212 printk(KERN_ERR "PM: Not enough free memory\n");
1214 return -ENOMEM; 1213 return -ENOMEM;
1215 } 1214 }
1216 1215
1217 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) { 1216 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1218 printk(KERN_ERR "swsusp: Memory allocation failed\n"); 1217 printk(KERN_ERR "PM: Memory allocation failed\n");
1219 return -ENOMEM; 1218 return -ENOMEM;
1220 } 1219 }
1221 1220
@@ -1235,7 +1234,8 @@ asmlinkage int swsusp_save(void)
1235 nr_copy_pages = nr_pages; 1234 nr_copy_pages = nr_pages;
1236 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 1235 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1237 1236
1238 printk("swsusp: critical section: done (%d pages copied)\n", nr_pages); 1237 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1238 nr_pages);
1239 1239
1240 return 0; 1240 return 0;
1241} 1241}
@@ -1264,12 +1264,17 @@ static char *check_image_kernel(struct swsusp_info *info)
1264} 1264}
1265#endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 1265#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1266 1266
1267unsigned long snapshot_get_image_size(void)
1268{
1269 return nr_copy_pages + nr_meta_pages + 1;
1270}
1271
1267static int init_header(struct swsusp_info *info) 1272static int init_header(struct swsusp_info *info)
1268{ 1273{
1269 memset(info, 0, sizeof(struct swsusp_info)); 1274 memset(info, 0, sizeof(struct swsusp_info));
1270 info->num_physpages = num_physpages; 1275 info->num_physpages = num_physpages;
1271 info->image_pages = nr_copy_pages; 1276 info->image_pages = nr_copy_pages;
1272 info->pages = nr_copy_pages + nr_meta_pages + 1; 1277 info->pages = snapshot_get_image_size();
1273 info->size = info->pages; 1278 info->size = info->pages;
1274 info->size <<= PAGE_SHIFT; 1279 info->size <<= PAGE_SHIFT;
1275 return init_header_complete(info); 1280 return init_header_complete(info);
@@ -1429,7 +1434,7 @@ static int check_header(struct swsusp_info *info)
1429 if (!reason && info->num_physpages != num_physpages) 1434 if (!reason && info->num_physpages != num_physpages)
1430 reason = "memory size"; 1435 reason = "memory size";
1431 if (reason) { 1436 if (reason) {
1432 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); 1437 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1433 return -EPERM; 1438 return -EPERM;
1434 } 1439 }
1435 return 0; 1440 return 0;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 917aba100575..a0abf9a463f9 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -28,8 +28,6 @@
28 28
29#include "power.h" 29#include "power.h"
30 30
31extern char resume_file[];
32
33#define SWSUSP_SIG "S1SUSPEND" 31#define SWSUSP_SIG "S1SUSPEND"
34 32
35struct swsusp_header { 33struct swsusp_header {
@@ -73,7 +71,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
73 bio->bi_end_io = end_swap_bio_read; 71 bio->bi_end_io = end_swap_bio_read;
74 72
75 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 73 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
76 printk("swsusp: ERROR: adding page to bio at %ld\n", page_off); 74 printk(KERN_ERR "PM: Adding page to bio failed at %ld\n",
75 page_off);
77 bio_put(bio); 76 bio_put(bio);
78 return -EFAULT; 77 return -EFAULT;
79 } 78 }
@@ -153,7 +152,7 @@ static int mark_swapfiles(sector_t start, unsigned int flags)
153 error = bio_write_page(swsusp_resume_block, 152 error = bio_write_page(swsusp_resume_block,
154 swsusp_header, NULL); 153 swsusp_header, NULL);
155 } else { 154 } else {
156 printk(KERN_ERR "swsusp: Swap header not found!\n"); 155 printk(KERN_ERR "PM: Swap header not found!\n");
157 error = -ENODEV; 156 error = -ENODEV;
158 } 157 }
159 return error; 158 return error;
@@ -325,7 +324,8 @@ static int save_image(struct swap_map_handle *handle,
325 struct timeval start; 324 struct timeval start;
326 struct timeval stop; 325 struct timeval stop;
327 326
328 printk("Saving image data pages (%u pages) ... ", nr_to_write); 327 printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
328 nr_to_write);
329 m = nr_to_write / 100; 329 m = nr_to_write / 100;
330 if (!m) 330 if (!m)
331 m = 1; 331 m = 1;
@@ -365,7 +365,7 @@ static int enough_swap(unsigned int nr_pages)
365{ 365{
366 unsigned int free_swap = count_swap_pages(root_swap, 1); 366 unsigned int free_swap = count_swap_pages(root_swap, 1);
367 367
368 pr_debug("swsusp: free swap pages: %u\n", free_swap); 368 pr_debug("PM: Free swap pages: %u\n", free_swap);
369 return free_swap > nr_pages + PAGES_FOR_IO; 369 return free_swap > nr_pages + PAGES_FOR_IO;
370} 370}
371 371
@@ -388,7 +388,7 @@ int swsusp_write(unsigned int flags)
388 388
389 error = swsusp_swap_check(); 389 error = swsusp_swap_check();
390 if (error) { 390 if (error) {
391 printk(KERN_ERR "swsusp: Cannot find swap device, try " 391 printk(KERN_ERR "PM: Cannot find swap device, try "
392 "swapon -a.\n"); 392 "swapon -a.\n");
393 return error; 393 return error;
394 } 394 }
@@ -402,7 +402,7 @@ int swsusp_write(unsigned int flags)
402 } 402 }
403 header = (struct swsusp_info *)data_of(snapshot); 403 header = (struct swsusp_info *)data_of(snapshot);
404 if (!enough_swap(header->pages)) { 404 if (!enough_swap(header->pages)) {
405 printk(KERN_ERR "swsusp: Not enough free swap\n"); 405 printk(KERN_ERR "PM: Not enough free swap\n");
406 error = -ENOSPC; 406 error = -ENOSPC;
407 goto out; 407 goto out;
408 } 408 }
@@ -417,7 +417,7 @@ int swsusp_write(unsigned int flags)
417 417
418 if (!error) { 418 if (!error) {
419 flush_swap_writer(&handle); 419 flush_swap_writer(&handle);
420 printk("S"); 420 printk(KERN_INFO "PM: S");
421 error = mark_swapfiles(start, flags); 421 error = mark_swapfiles(start, flags);
422 printk("|\n"); 422 printk("|\n");
423 } 423 }
@@ -507,7 +507,8 @@ static int load_image(struct swap_map_handle *handle,
507 int err2; 507 int err2;
508 unsigned nr_pages; 508 unsigned nr_pages;
509 509
510 printk("Loading image data pages (%u pages) ... ", nr_to_read); 510 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
511 nr_to_read);
511 m = nr_to_read / 100; 512 m = nr_to_read / 100;
512 if (!m) 513 if (!m)
513 m = 1; 514 m = 1;
@@ -558,7 +559,7 @@ int swsusp_read(unsigned int *flags_p)
558 559
559 *flags_p = swsusp_header->flags; 560 *flags_p = swsusp_header->flags;
560 if (IS_ERR(resume_bdev)) { 561 if (IS_ERR(resume_bdev)) {
561 pr_debug("swsusp: block device not initialised\n"); 562 pr_debug("PM: Image device not initialised\n");
562 return PTR_ERR(resume_bdev); 563 return PTR_ERR(resume_bdev);
563 } 564 }
564 565
@@ -577,9 +578,9 @@ int swsusp_read(unsigned int *flags_p)
577 blkdev_put(resume_bdev); 578 blkdev_put(resume_bdev);
578 579
579 if (!error) 580 if (!error)
580 pr_debug("swsusp: Reading resume file was successful\n"); 581 pr_debug("PM: Image successfully loaded\n");
581 else 582 else
582 pr_debug("swsusp: Error %d resuming\n", error); 583 pr_debug("PM: Error %d resuming\n", error);
583 return error; 584 return error;
584} 585}
585 586
@@ -611,13 +612,13 @@ int swsusp_check(void)
611 if (error) 612 if (error)
612 blkdev_put(resume_bdev); 613 blkdev_put(resume_bdev);
613 else 614 else
614 pr_debug("swsusp: Signature found, resuming\n"); 615 pr_debug("PM: Signature found, resuming\n");
615 } else { 616 } else {
616 error = PTR_ERR(resume_bdev); 617 error = PTR_ERR(resume_bdev);
617 } 618 }
618 619
619 if (error) 620 if (error)
620 pr_debug("swsusp: Error %d check for resume file\n", error); 621 pr_debug("PM: Error %d checking image file\n", error);
621 622
622 return error; 623 return error;
623} 624}
@@ -629,7 +630,7 @@ int swsusp_check(void)
629void swsusp_close(void) 630void swsusp_close(void)
630{ 631{
631 if (IS_ERR(resume_bdev)) { 632 if (IS_ERR(resume_bdev)) {
632 pr_debug("swsusp: block device not initialised\n"); 633 pr_debug("PM: Image device not initialised\n");
633 return; 634 return;
634 } 635 }
635 636
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index e1722d3155f1..023ff2a31d89 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -64,14 +64,6 @@ unsigned long image_size = 500 * 1024 * 1024;
64 64
65int in_suspend __nosavedata = 0; 65int in_suspend __nosavedata = 0;
66 66
67#ifdef CONFIG_HIGHMEM
68unsigned int count_highmem_pages(void);
69int restore_highmem(void);
70#else
71static inline int restore_highmem(void) { return 0; }
72static inline unsigned int count_highmem_pages(void) { return 0; }
73#endif
74
75/** 67/**
76 * The following functions are used for tracing the allocated 68 * The following functions are used for tracing the allocated
77 * swap pages, so that they can be freed in case of an error. 69 * swap pages, so that they can be freed in case of an error.
@@ -196,7 +188,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
196 centisecs = 1; /* avoid div-by-zero */ 188 centisecs = 1; /* avoid div-by-zero */
197 k = nr_pages * (PAGE_SIZE / 1024); 189 k = nr_pages * (PAGE_SIZE / 1024);
198 kps = (k * 100) / centisecs; 190 kps = (k * 100) / centisecs;
199 printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k, 191 printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
192 msg, k,
200 centisecs / 100, centisecs % 100, 193 centisecs / 100, centisecs % 100,
201 kps / 1000, (kps % 1000) / 10); 194 kps / 1000, (kps % 1000) / 10);
202} 195}
@@ -227,7 +220,7 @@ int swsusp_shrink_memory(void)
227 char *p = "-\\|/"; 220 char *p = "-\\|/";
228 struct timeval start, stop; 221 struct timeval start, stop;
229 222
230 printk("Shrinking memory... "); 223 printk(KERN_INFO "PM: Shrinking memory... ");
231 do_gettimeofday(&start); 224 do_gettimeofday(&start);
232 do { 225 do {
233 long size, highmem_size; 226 long size, highmem_size;
@@ -269,38 +262,3 @@ int swsusp_shrink_memory(void)
269 262
270 return 0; 263 return 0;
271} 264}
272
273int swsusp_resume(void)
274{
275 int error;
276
277 local_irq_disable();
278 /* NOTE: device_power_down() is just a suspend() with irqs off;
279 * it has no special "power things down" semantics
280 */
281 if (device_power_down(PMSG_PRETHAW))
282 printk(KERN_ERR "Some devices failed to power down, very bad\n");
283 /* We'll ignore saved state, but this gets preempt count (etc) right */
284 save_processor_state();
285 error = restore_highmem();
286 if (!error) {
287 error = swsusp_arch_resume();
288 /* The code below is only ever reached in case of a failure.
289 * Otherwise execution continues at place where
290 * swsusp_arch_suspend() was called
291 */
292 BUG_ON(!error);
293 /* This call to restore_highmem() undos the previous one */
294 restore_highmem();
295 }
296 /* The only reason why swsusp_arch_resume() can fail is memory being
297 * very tight, so we have to free it as soon as we can to avoid
298 * subsequent failures
299 */
300 swsusp_free();
301 restore_processor_state();
302 touch_softlockup_watchdog();
303 device_power_up();
304 local_irq_enable();
305 return error;
306}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 5bd321bcbb75..f5512cb3aa86 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -28,6 +28,29 @@
28 28
29#include "power.h" 29#include "power.h"
30 30
31/*
32 * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
33 * will be removed in the future. They are only preserved here for
34 * compatibility with existing userland utilities.
35 */
36#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
37#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
38
39#define PMOPS_PREPARE 1
40#define PMOPS_ENTER 2
41#define PMOPS_FINISH 3
42
43/*
44 * NOTE: The following ioctl definitions are wrong and have been replaced with
45 * correct ones. They are only preserved here for compatibility with existing
46 * userland utilities and will be removed in the future.
47 */
48#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
49#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
50#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
51#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
52
53
31#define SNAPSHOT_MINOR 231 54#define SNAPSHOT_MINOR 231
32 55
33static struct snapshot_data { 56static struct snapshot_data {
@@ -36,7 +59,7 @@ static struct snapshot_data {
36 int mode; 59 int mode;
37 char frozen; 60 char frozen;
38 char ready; 61 char ready;
39 char platform_suspend; 62 char platform_support;
40} snapshot_state; 63} snapshot_state;
41 64
42atomic_t snapshot_device_available = ATOMIC_INIT(1); 65atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -44,6 +67,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
44static int snapshot_open(struct inode *inode, struct file *filp) 67static int snapshot_open(struct inode *inode, struct file *filp)
45{ 68{
46 struct snapshot_data *data; 69 struct snapshot_data *data;
70 int error;
47 71
48 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) 72 if (!atomic_add_unless(&snapshot_device_available, -1, 0))
49 return -EBUSY; 73 return -EBUSY;
@@ -64,13 +88,23 @@ static int snapshot_open(struct inode *inode, struct file *filp)
64 data->swap = swsusp_resume_device ? 88 data->swap = swsusp_resume_device ?
65 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 89 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
66 data->mode = O_RDONLY; 90 data->mode = O_RDONLY;
91 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
92 if (error)
93 pm_notifier_call_chain(PM_POST_RESTORE);
67 } else { 94 } else {
68 data->swap = -1; 95 data->swap = -1;
69 data->mode = O_WRONLY; 96 data->mode = O_WRONLY;
97 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
98 if (error)
99 pm_notifier_call_chain(PM_POST_HIBERNATION);
100 }
101 if (error) {
102 atomic_inc(&snapshot_device_available);
103 return error;
70 } 104 }
71 data->frozen = 0; 105 data->frozen = 0;
72 data->ready = 0; 106 data->ready = 0;
73 data->platform_suspend = 0; 107 data->platform_support = 0;
74 108
75 return 0; 109 return 0;
76} 110}
@@ -88,6 +122,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
88 thaw_processes(); 122 thaw_processes();
89 mutex_unlock(&pm_mutex); 123 mutex_unlock(&pm_mutex);
90 } 124 }
125 pm_notifier_call_chain(data->mode == O_WRONLY ?
126 PM_POST_HIBERNATION : PM_POST_RESTORE);
91 atomic_inc(&snapshot_device_available); 127 atomic_inc(&snapshot_device_available);
92 return 0; 128 return 0;
93} 129}
@@ -133,7 +169,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
133{ 169{
134 int error = 0; 170 int error = 0;
135 struct snapshot_data *data; 171 struct snapshot_data *data;
136 loff_t avail; 172 loff_t size;
137 sector_t offset; 173 sector_t offset;
138 174
139 if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) 175 if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
@@ -151,18 +187,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
151 if (data->frozen) 187 if (data->frozen)
152 break; 188 break;
153 mutex_lock(&pm_mutex); 189 mutex_lock(&pm_mutex);
154 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 190 printk("Syncing filesystems ... ");
155 if (!error) { 191 sys_sync();
156 printk("Syncing filesystems ... "); 192 printk("done.\n");
157 sys_sync(); 193
158 printk("done.\n"); 194 error = freeze_processes();
159
160 error = freeze_processes();
161 if (error)
162 thaw_processes();
163 }
164 if (error) 195 if (error)
165 pm_notifier_call_chain(PM_POST_HIBERNATION); 196 thaw_processes();
166 mutex_unlock(&pm_mutex); 197 mutex_unlock(&pm_mutex);
167 if (!error) 198 if (!error)
168 data->frozen = 1; 199 data->frozen = 1;
@@ -173,19 +204,19 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
173 break; 204 break;
174 mutex_lock(&pm_mutex); 205 mutex_lock(&pm_mutex);
175 thaw_processes(); 206 thaw_processes();
176 pm_notifier_call_chain(PM_POST_HIBERNATION);
177 mutex_unlock(&pm_mutex); 207 mutex_unlock(&pm_mutex);
178 data->frozen = 0; 208 data->frozen = 0;
179 break; 209 break;
180 210
211 case SNAPSHOT_CREATE_IMAGE:
181 case SNAPSHOT_ATOMIC_SNAPSHOT: 212 case SNAPSHOT_ATOMIC_SNAPSHOT:
182 if (data->mode != O_RDONLY || !data->frozen || data->ready) { 213 if (data->mode != O_RDONLY || !data->frozen || data->ready) {
183 error = -EPERM; 214 error = -EPERM;
184 break; 215 break;
185 } 216 }
186 error = hibernation_snapshot(data->platform_suspend); 217 error = hibernation_snapshot(data->platform_support);
187 if (!error) 218 if (!error)
188 error = put_user(in_suspend, (unsigned int __user *)arg); 219 error = put_user(in_suspend, (int __user *)arg);
189 if (!error) 220 if (!error)
190 data->ready = 1; 221 data->ready = 1;
191 break; 222 break;
@@ -197,7 +228,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
197 error = -EPERM; 228 error = -EPERM;
198 break; 229 break;
199 } 230 }
200 error = hibernation_restore(data->platform_suspend); 231 error = hibernation_restore(data->platform_support);
201 break; 232 break;
202 233
203 case SNAPSHOT_FREE: 234 case SNAPSHOT_FREE:
@@ -206,16 +237,29 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
206 data->ready = 0; 237 data->ready = 0;
207 break; 238 break;
208 239
240 case SNAPSHOT_PREF_IMAGE_SIZE:
209 case SNAPSHOT_SET_IMAGE_SIZE: 241 case SNAPSHOT_SET_IMAGE_SIZE:
210 image_size = arg; 242 image_size = arg;
211 break; 243 break;
212 244
245 case SNAPSHOT_GET_IMAGE_SIZE:
246 if (!data->ready) {
247 error = -ENODATA;
248 break;
249 }
250 size = snapshot_get_image_size();
251 size <<= PAGE_SHIFT;
252 error = put_user(size, (loff_t __user *)arg);
253 break;
254
255 case SNAPSHOT_AVAIL_SWAP_SIZE:
213 case SNAPSHOT_AVAIL_SWAP: 256 case SNAPSHOT_AVAIL_SWAP:
214 avail = count_swap_pages(data->swap, 1); 257 size = count_swap_pages(data->swap, 1);
215 avail <<= PAGE_SHIFT; 258 size <<= PAGE_SHIFT;
216 error = put_user(avail, (loff_t __user *)arg); 259 error = put_user(size, (loff_t __user *)arg);
217 break; 260 break;
218 261
262 case SNAPSHOT_ALLOC_SWAP_PAGE:
219 case SNAPSHOT_GET_SWAP_PAGE: 263 case SNAPSHOT_GET_SWAP_PAGE:
220 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { 264 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
221 error = -ENODEV; 265 error = -ENODEV;
@@ -224,7 +268,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
224 offset = alloc_swapdev_block(data->swap); 268 offset = alloc_swapdev_block(data->swap);
225 if (offset) { 269 if (offset) {
226 offset <<= PAGE_SHIFT; 270 offset <<= PAGE_SHIFT;
227 error = put_user(offset, (sector_t __user *)arg); 271 error = put_user(offset, (loff_t __user *)arg);
228 } else { 272 } else {
229 error = -ENOSPC; 273 error = -ENOSPC;
230 } 274 }
@@ -238,7 +282,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
238 free_all_swap_pages(data->swap); 282 free_all_swap_pages(data->swap);
239 break; 283 break;
240 284
241 case SNAPSHOT_SET_SWAP_FILE: 285 case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
242 if (!swsusp_swap_in_use()) { 286 if (!swsusp_swap_in_use()) {
243 /* 287 /*
244 * User space encodes device types as two-byte values, 288 * User space encodes device types as two-byte values,
@@ -275,26 +319,33 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
275 mutex_unlock(&pm_mutex); 319 mutex_unlock(&pm_mutex);
276 break; 320 break;
277 321
278 case SNAPSHOT_PMOPS: 322 case SNAPSHOT_PLATFORM_SUPPORT:
323 data->platform_support = !!arg;
324 break;
325
326 case SNAPSHOT_POWER_OFF:
327 if (data->platform_support)
328 error = hibernation_platform_enter();
329 break;
330
331 case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
279 error = -EINVAL; 332 error = -EINVAL;
280 333
281 switch (arg) { 334 switch (arg) {
282 335
283 case PMOPS_PREPARE: 336 case PMOPS_PREPARE:
284 data->platform_suspend = 1; 337 data->platform_support = 1;
285 error = 0; 338 error = 0;
286 break; 339 break;
287 340
288 case PMOPS_ENTER: 341 case PMOPS_ENTER:
289 if (data->platform_suspend) 342 if (data->platform_support)
290 error = hibernation_platform_enter(); 343 error = hibernation_platform_enter();
291
292 break; 344 break;
293 345
294 case PMOPS_FINISH: 346 case PMOPS_FINISH:
295 if (data->platform_suspend) 347 if (data->platform_support)
296 error = 0; 348 error = 0;
297
298 break; 349 break;
299 350
300 default: 351 default: