diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-12 16:17:27 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-12 16:17:27 -0400 |
| commit | 947ec0b0c1e7e80eef4fe64f7763a06d0cf04d2e (patch) | |
| tree | 29547b6975d58c3b252f08dc6c2dbda3b9adfa88 /kernel | |
| parent | c53567ad4528b6efefc3fc22a354d20f6226a098 (diff) | |
| parent | 5818a6e2519b34cd6d0220d89f5729ab2725e1bf (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM: Add empty suspend/resume device irq functions
PM/Hibernate: Move NVS routines into a seperate file (v2).
PM/Hibernate: Rename disk.c to hibernate.c
PM: Separate suspend to RAM functionality from core
Driver Core: Rework platform suspend/resume, print warning
PM: Remove device_type suspend()/resume()
PM/Hibernate: Move memory shrinking to snapshot.c (rev. 2)
PM/Suspend: Do not shrink memory before suspend
PM: Remove bus_type suspend_late()/resume_early() V2
PM core: rename suspend and resume functions
PM: Rename device_power_down/up()
PM: Remove unused asm/suspend.h
x86: unify power/cpu_(32|64).c
x86: unify power/cpu_(32|64) copyright notes
x86: unify power/cpu_(32|64) regarding restoring processor state
x86: unify power/cpu_(32|64) regarding saving processor state
x86: unify power/cpu_(32|64) global variables
x86: unify power/cpu_(32|64) headers
PM: Warn if interrupts are enabled during suspend-resume of sysdevs
PM/ACPI/x86: Fix sparse warning in arch/x86/kernel/acpi/sleep.c
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/kexec.c | 14 | ||||
| -rw-r--r-- | kernel/power/Kconfig | 4 | ||||
| -rw-r--r-- | kernel/power/Makefile | 5 | ||||
| -rw-r--r-- | kernel/power/hibernate.c (renamed from kernel/power/disk.c) | 34 | ||||
| -rw-r--r-- | kernel/power/hibernate_nvs.c | 135 | ||||
| -rw-r--r-- | kernel/power/main.c | 521 | ||||
| -rw-r--r-- | kernel/power/power.h | 25 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 80 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 300 | ||||
| -rw-r--r-- | kernel/power/suspend_test.c | 187 | ||||
| -rw-r--r-- | kernel/power/swsusp.c | 198 |
11 files changed, 751 insertions, 752 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c index e4983770913b..ae1c35201cc8 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1448,17 +1448,17 @@ int kernel_kexec(void) | |||
| 1448 | goto Restore_console; | 1448 | goto Restore_console; |
| 1449 | } | 1449 | } |
| 1450 | suspend_console(); | 1450 | suspend_console(); |
| 1451 | error = device_suspend(PMSG_FREEZE); | 1451 | error = dpm_suspend_start(PMSG_FREEZE); |
| 1452 | if (error) | 1452 | if (error) |
| 1453 | goto Resume_console; | 1453 | goto Resume_console; |
| 1454 | /* At this point, device_suspend() has been called, | 1454 | /* At this point, dpm_suspend_start() has been called, |
| 1455 | * but *not* device_power_down(). We *must* | 1455 | * but *not* dpm_suspend_noirq(). We *must* call |
| 1456 | * device_power_down() now. Otherwise, drivers for | 1456 | * dpm_suspend_noirq() now. Otherwise, drivers for |
| 1457 | * some devices (e.g. interrupt controllers) become | 1457 | * some devices (e.g. interrupt controllers) become |
| 1458 | * desynchronized with the actual state of the | 1458 | * desynchronized with the actual state of the |
| 1459 | * hardware at resume time, and evil weirdness ensues. | 1459 | * hardware at resume time, and evil weirdness ensues. |
| 1460 | */ | 1460 | */ |
| 1461 | error = device_power_down(PMSG_FREEZE); | 1461 | error = dpm_suspend_noirq(PMSG_FREEZE); |
| 1462 | if (error) | 1462 | if (error) |
| 1463 | goto Resume_devices; | 1463 | goto Resume_devices; |
| 1464 | error = disable_nonboot_cpus(); | 1464 | error = disable_nonboot_cpus(); |
| @@ -1486,9 +1486,9 @@ int kernel_kexec(void) | |||
| 1486 | local_irq_enable(); | 1486 | local_irq_enable(); |
| 1487 | Enable_cpus: | 1487 | Enable_cpus: |
| 1488 | enable_nonboot_cpus(); | 1488 | enable_nonboot_cpus(); |
| 1489 | device_power_up(PMSG_RESTORE); | 1489 | dpm_resume_noirq(PMSG_RESTORE); |
| 1490 | Resume_devices: | 1490 | Resume_devices: |
| 1491 | device_resume(PMSG_RESTORE); | 1491 | dpm_resume_end(PMSG_RESTORE); |
| 1492 | Resume_console: | 1492 | Resume_console: |
| 1493 | resume_console(); | 1493 | resume_console(); |
| 1494 | thaw_processes(); | 1494 | thaw_processes(); |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 23bd4daeb96b..72067cbdb37f 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -116,9 +116,13 @@ config SUSPEND_FREEZER | |||
| 116 | 116 | ||
| 117 | Turning OFF this setting is NOT recommended! If in doubt, say Y. | 117 | Turning OFF this setting is NOT recommended! If in doubt, say Y. |
| 118 | 118 | ||
| 119 | config HIBERNATION_NVS | ||
| 120 | bool | ||
| 121 | |||
| 119 | config HIBERNATION | 122 | config HIBERNATION |
| 120 | bool "Hibernation (aka 'suspend to disk')" | 123 | bool "Hibernation (aka 'suspend to disk')" |
| 121 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE | 124 | depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE |
| 125 | select HIBERNATION_NVS if HAS_IOMEM | ||
| 122 | ---help--- | 126 | ---help--- |
| 123 | Enable the suspend to disk (STD) functionality, which is usually | 127 | Enable the suspend to disk (STD) functionality, which is usually |
| 124 | called "hibernation" in user interfaces. STD checkpoints the | 128 | called "hibernation" in user interfaces. STD checkpoints the |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 720ea4f781bd..c3b81c30e5d5 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -6,6 +6,9 @@ endif | |||
| 6 | obj-$(CONFIG_PM) += main.o | 6 | obj-$(CONFIG_PM) += main.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += console.o | 7 | obj-$(CONFIG_PM_SLEEP) += console.o |
| 8 | obj-$(CONFIG_FREEZER) += process.o | 8 | obj-$(CONFIG_FREEZER) += process.o |
| 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o | 9 | obj-$(CONFIG_SUSPEND) += suspend.o |
| 10 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | ||
| 11 | obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o | ||
| 12 | obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o | ||
| 10 | 13 | ||
| 11 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 14 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/disk.c b/kernel/power/hibernate.c index 5cb080e7eebd..81d2e7464893 100644 --- a/kernel/power/disk.c +++ b/kernel/power/hibernate.c | |||
| @@ -1,12 +1,12 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * kernel/power/disk.c - Suspend-to-disk support. | 2 | * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2003 Patrick Mochel | 4 | * Copyright (c) 2003 Patrick Mochel |
| 5 | * Copyright (c) 2003 Open Source Development Lab | 5 | * Copyright (c) 2003 Open Source Development Lab |
| 6 | * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> | 6 | * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> |
| 7 | * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. | ||
| 7 | * | 8 | * |
| 8 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| 9 | * | ||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
| @@ -215,13 +215,13 @@ static int create_image(int platform_mode) | |||
| 215 | if (error) | 215 | if (error) |
| 216 | return error; | 216 | return error; |
| 217 | 217 | ||
| 218 | /* At this point, device_suspend() has been called, but *not* | 218 | /* At this point, dpm_suspend_start() has been called, but *not* |
| 219 | * device_power_down(). We *must* call device_power_down() now. | 219 | * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now. |
| 220 | * Otherwise, drivers for some devices (e.g. interrupt controllers) | 220 | * Otherwise, drivers for some devices (e.g. interrupt controllers) |
| 221 | * become desynchronized with the actual state of the hardware | 221 | * become desynchronized with the actual state of the hardware |
| 222 | * at resume time, and evil weirdness ensues. | 222 | * at resume time, and evil weirdness ensues. |
| 223 | */ | 223 | */ |
| 224 | error = device_power_down(PMSG_FREEZE); | 224 | error = dpm_suspend_noirq(PMSG_FREEZE); |
| 225 | if (error) { | 225 | if (error) { |
| 226 | printk(KERN_ERR "PM: Some devices failed to power down, " | 226 | printk(KERN_ERR "PM: Some devices failed to power down, " |
| 227 | "aborting hibernation\n"); | 227 | "aborting hibernation\n"); |
| @@ -262,7 +262,7 @@ static int create_image(int platform_mode) | |||
| 262 | 262 | ||
| 263 | Power_up: | 263 | Power_up: |
| 264 | sysdev_resume(); | 264 | sysdev_resume(); |
| 265 | /* NOTE: device_power_up() is just a resume() for devices | 265 | /* NOTE: dpm_resume_noirq() is just a resume() for devices |
| 266 | * that suspended with irqs off ... no overall powerup. | 266 | * that suspended with irqs off ... no overall powerup. |
| 267 | */ | 267 | */ |
| 268 | 268 | ||
| @@ -275,7 +275,7 @@ static int create_image(int platform_mode) | |||
| 275 | Platform_finish: | 275 | Platform_finish: |
| 276 | platform_finish(platform_mode); | 276 | platform_finish(platform_mode); |
| 277 | 277 | ||
| 278 | device_power_up(in_suspend ? | 278 | dpm_resume_noirq(in_suspend ? |
| 279 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 279 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| 280 | 280 | ||
| 281 | return error; | 281 | return error; |
| @@ -304,7 +304,7 @@ int hibernation_snapshot(int platform_mode) | |||
| 304 | goto Close; | 304 | goto Close; |
| 305 | 305 | ||
| 306 | suspend_console(); | 306 | suspend_console(); |
| 307 | error = device_suspend(PMSG_FREEZE); | 307 | error = dpm_suspend_start(PMSG_FREEZE); |
| 308 | if (error) | 308 | if (error) |
| 309 | goto Recover_platform; | 309 | goto Recover_platform; |
| 310 | 310 | ||
| @@ -315,7 +315,7 @@ int hibernation_snapshot(int platform_mode) | |||
| 315 | /* Control returns here after successful restore */ | 315 | /* Control returns here after successful restore */ |
| 316 | 316 | ||
| 317 | Resume_devices: | 317 | Resume_devices: |
| 318 | device_resume(in_suspend ? | 318 | dpm_resume_end(in_suspend ? |
| 319 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 319 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| 320 | resume_console(); | 320 | resume_console(); |
| 321 | Close: | 321 | Close: |
| @@ -339,7 +339,7 @@ static int resume_target_kernel(bool platform_mode) | |||
| 339 | { | 339 | { |
| 340 | int error; | 340 | int error; |
| 341 | 341 | ||
| 342 | error = device_power_down(PMSG_QUIESCE); | 342 | error = dpm_suspend_noirq(PMSG_QUIESCE); |
| 343 | if (error) { | 343 | if (error) { |
| 344 | printk(KERN_ERR "PM: Some devices failed to power down, " | 344 | printk(KERN_ERR "PM: Some devices failed to power down, " |
| 345 | "aborting resume\n"); | 345 | "aborting resume\n"); |
| @@ -394,7 +394,7 @@ static int resume_target_kernel(bool platform_mode) | |||
| 394 | Cleanup: | 394 | Cleanup: |
| 395 | platform_restore_cleanup(platform_mode); | 395 | platform_restore_cleanup(platform_mode); |
| 396 | 396 | ||
| 397 | device_power_up(PMSG_RECOVER); | 397 | dpm_resume_noirq(PMSG_RECOVER); |
| 398 | 398 | ||
| 399 | return error; | 399 | return error; |
| 400 | } | 400 | } |
| @@ -414,10 +414,10 @@ int hibernation_restore(int platform_mode) | |||
| 414 | 414 | ||
| 415 | pm_prepare_console(); | 415 | pm_prepare_console(); |
| 416 | suspend_console(); | 416 | suspend_console(); |
| 417 | error = device_suspend(PMSG_QUIESCE); | 417 | error = dpm_suspend_start(PMSG_QUIESCE); |
| 418 | if (!error) { | 418 | if (!error) { |
| 419 | error = resume_target_kernel(platform_mode); | 419 | error = resume_target_kernel(platform_mode); |
| 420 | device_resume(PMSG_RECOVER); | 420 | dpm_resume_end(PMSG_RECOVER); |
| 421 | } | 421 | } |
| 422 | resume_console(); | 422 | resume_console(); |
| 423 | pm_restore_console(); | 423 | pm_restore_console(); |
| @@ -447,14 +447,14 @@ int hibernation_platform_enter(void) | |||
| 447 | 447 | ||
| 448 | entering_platform_hibernation = true; | 448 | entering_platform_hibernation = true; |
| 449 | suspend_console(); | 449 | suspend_console(); |
| 450 | error = device_suspend(PMSG_HIBERNATE); | 450 | error = dpm_suspend_start(PMSG_HIBERNATE); |
| 451 | if (error) { | 451 | if (error) { |
| 452 | if (hibernation_ops->recover) | 452 | if (hibernation_ops->recover) |
| 453 | hibernation_ops->recover(); | 453 | hibernation_ops->recover(); |
| 454 | goto Resume_devices; | 454 | goto Resume_devices; |
| 455 | } | 455 | } |
| 456 | 456 | ||
| 457 | error = device_power_down(PMSG_HIBERNATE); | 457 | error = dpm_suspend_noirq(PMSG_HIBERNATE); |
| 458 | if (error) | 458 | if (error) |
| 459 | goto Resume_devices; | 459 | goto Resume_devices; |
| 460 | 460 | ||
| @@ -479,11 +479,11 @@ int hibernation_platform_enter(void) | |||
| 479 | Platofrm_finish: | 479 | Platofrm_finish: |
| 480 | hibernation_ops->finish(); | 480 | hibernation_ops->finish(); |
| 481 | 481 | ||
| 482 | device_power_up(PMSG_RESTORE); | 482 | dpm_suspend_noirq(PMSG_RESTORE); |
| 483 | 483 | ||
| 484 | Resume_devices: | 484 | Resume_devices: |
| 485 | entering_platform_hibernation = false; | 485 | entering_platform_hibernation = false; |
| 486 | device_resume(PMSG_RESTORE); | 486 | dpm_resume_end(PMSG_RESTORE); |
| 487 | resume_console(); | 487 | resume_console(); |
| 488 | 488 | ||
| 489 | Close: | 489 | Close: |
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c new file mode 100644 index 000000000000..39ac698ef836 --- /dev/null +++ b/kernel/power/hibernate_nvs.c | |||
| @@ -0,0 +1,135 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/io.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/list.h> | ||
| 12 | #include <linux/mm.h> | ||
| 13 | #include <linux/suspend.h> | ||
| 14 | |||
| 15 | /* | ||
| 16 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
| 17 | * hibernation and to restore the contents of this memory during the subsequent | ||
| 18 | * resume. The code below implements a mechanism allowing us to do that. | ||
| 19 | */ | ||
| 20 | |||
| 21 | struct nvs_page { | ||
| 22 | unsigned long phys_start; | ||
| 23 | unsigned int size; | ||
| 24 | void *kaddr; | ||
| 25 | void *data; | ||
| 26 | struct list_head node; | ||
| 27 | }; | ||
| 28 | |||
| 29 | static LIST_HEAD(nvs_list); | ||
| 30 | |||
| 31 | /** | ||
| 32 | * hibernate_nvs_register - register platform NVS memory region to save | ||
| 33 | * @start - physical address of the region | ||
| 34 | * @size - size of the region | ||
| 35 | * | ||
| 36 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
| 37 | * things so that the data from page-aligned addresses in this region will | ||
| 38 | * be copied into separate RAM pages. | ||
| 39 | */ | ||
| 40 | int hibernate_nvs_register(unsigned long start, unsigned long size) | ||
| 41 | { | ||
| 42 | struct nvs_page *entry, *next; | ||
| 43 | |||
| 44 | while (size > 0) { | ||
| 45 | unsigned int nr_bytes; | ||
| 46 | |||
| 47 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
| 48 | if (!entry) | ||
| 49 | goto Error; | ||
| 50 | |||
| 51 | list_add_tail(&entry->node, &nvs_list); | ||
| 52 | entry->phys_start = start; | ||
| 53 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
| 54 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
| 55 | |||
| 56 | start += entry->size; | ||
| 57 | size -= entry->size; | ||
| 58 | } | ||
| 59 | return 0; | ||
| 60 | |||
| 61 | Error: | ||
| 62 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
| 63 | list_del(&entry->node); | ||
| 64 | kfree(entry); | ||
| 65 | } | ||
| 66 | return -ENOMEM; | ||
| 67 | } | ||
| 68 | |||
| 69 | /** | ||
| 70 | * hibernate_nvs_free - free data pages allocated for saving NVS regions | ||
| 71 | */ | ||
| 72 | void hibernate_nvs_free(void) | ||
| 73 | { | ||
| 74 | struct nvs_page *entry; | ||
| 75 | |||
| 76 | list_for_each_entry(entry, &nvs_list, node) | ||
| 77 | if (entry->data) { | ||
| 78 | free_page((unsigned long)entry->data); | ||
| 79 | entry->data = NULL; | ||
| 80 | if (entry->kaddr) { | ||
| 81 | iounmap(entry->kaddr); | ||
| 82 | entry->kaddr = NULL; | ||
| 83 | } | ||
| 84 | } | ||
| 85 | } | ||
| 86 | |||
| 87 | /** | ||
| 88 | * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions | ||
| 89 | */ | ||
| 90 | int hibernate_nvs_alloc(void) | ||
| 91 | { | ||
| 92 | struct nvs_page *entry; | ||
| 93 | |||
| 94 | list_for_each_entry(entry, &nvs_list, node) { | ||
| 95 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
| 96 | if (!entry->data) { | ||
| 97 | hibernate_nvs_free(); | ||
| 98 | return -ENOMEM; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | /** | ||
| 105 | * hibernate_nvs_save - save NVS memory regions | ||
| 106 | */ | ||
| 107 | void hibernate_nvs_save(void) | ||
| 108 | { | ||
| 109 | struct nvs_page *entry; | ||
| 110 | |||
| 111 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
| 112 | |||
| 113 | list_for_each_entry(entry, &nvs_list, node) | ||
| 114 | if (entry->data) { | ||
| 115 | entry->kaddr = ioremap(entry->phys_start, entry->size); | ||
| 116 | memcpy(entry->data, entry->kaddr, entry->size); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | /** | ||
| 121 | * hibernate_nvs_restore - restore NVS memory regions | ||
| 122 | * | ||
| 123 | * This function is going to be called with interrupts disabled, so it | ||
| 124 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
| 125 | */ | ||
| 126 | void hibernate_nvs_restore(void) | ||
| 127 | { | ||
| 128 | struct nvs_page *entry; | ||
| 129 | |||
| 130 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
| 131 | |||
| 132 | list_for_each_entry(entry, &nvs_list, node) | ||
| 133 | if (entry->data) | ||
| 134 | memcpy(entry->kaddr, entry->data, entry->size); | ||
| 135 | } | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 868028280d13..f710e36930cc 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -8,20 +8,9 @@ | |||
| 8 | * | 8 | * |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/suspend.h> | ||
| 13 | #include <linux/kobject.h> | 11 | #include <linux/kobject.h> |
| 14 | #include <linux/string.h> | 12 | #include <linux/string.h> |
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/errno.h> | ||
| 17 | #include <linux/kmod.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/console.h> | ||
| 20 | #include <linux/cpu.h> | ||
| 21 | #include <linux/resume-trace.h> | 13 | #include <linux/resume-trace.h> |
| 22 | #include <linux/freezer.h> | ||
| 23 | #include <linux/vmstat.h> | ||
| 24 | #include <linux/syscalls.h> | ||
| 25 | 14 | ||
| 26 | #include "power.h" | 15 | #include "power.h" |
| 27 | 16 | ||
| @@ -119,373 +108,6 @@ power_attr(pm_test); | |||
| 119 | 108 | ||
| 120 | #endif /* CONFIG_PM_SLEEP */ | 109 | #endif /* CONFIG_PM_SLEEP */ |
| 121 | 110 | ||
| 122 | #ifdef CONFIG_SUSPEND | ||
| 123 | |||
| 124 | static int suspend_test(int level) | ||
| 125 | { | ||
| 126 | #ifdef CONFIG_PM_DEBUG | ||
| 127 | if (pm_test_level == level) { | ||
| 128 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
| 129 | mdelay(5000); | ||
| 130 | return 1; | ||
| 131 | } | ||
| 132 | #endif /* !CONFIG_PM_DEBUG */ | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | #ifdef CONFIG_PM_TEST_SUSPEND | ||
| 137 | |||
| 138 | /* | ||
| 139 | * We test the system suspend code by setting an RTC wakealarm a short | ||
| 140 | * time in the future, then suspending. Suspending the devices won't | ||
| 141 | * normally take long ... some systems only need a few milliseconds. | ||
| 142 | * | ||
| 143 | * The time it takes is system-specific though, so when we test this | ||
| 144 | * during system bootup we allow a LOT of time. | ||
| 145 | */ | ||
| 146 | #define TEST_SUSPEND_SECONDS 5 | ||
| 147 | |||
| 148 | static unsigned long suspend_test_start_time; | ||
| 149 | |||
| 150 | static void suspend_test_start(void) | ||
| 151 | { | ||
| 152 | /* FIXME Use better timebase than "jiffies", ideally a clocksource. | ||
| 153 | * What we want is a hardware counter that will work correctly even | ||
| 154 | * during the irqs-are-off stages of the suspend/resume cycle... | ||
| 155 | */ | ||
| 156 | suspend_test_start_time = jiffies; | ||
| 157 | } | ||
| 158 | |||
| 159 | static void suspend_test_finish(const char *label) | ||
| 160 | { | ||
| 161 | long nj = jiffies - suspend_test_start_time; | ||
| 162 | unsigned msec; | ||
| 163 | |||
| 164 | msec = jiffies_to_msecs(abs(nj)); | ||
| 165 | pr_info("PM: %s took %d.%03d seconds\n", label, | ||
| 166 | msec / 1000, msec % 1000); | ||
| 167 | |||
| 168 | /* Warning on suspend means the RTC alarm period needs to be | ||
| 169 | * larger -- the system was sooo slooowwww to suspend that the | ||
| 170 | * alarm (should have) fired before the system went to sleep! | ||
| 171 | * | ||
| 172 | * Warning on either suspend or resume also means the system | ||
| 173 | * has some performance issues. The stack dump of a WARN_ON | ||
| 174 | * is more likely to get the right attention than a printk... | ||
| 175 | */ | ||
| 176 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); | ||
| 177 | } | ||
| 178 | |||
| 179 | #else | ||
| 180 | |||
| 181 | static void suspend_test_start(void) | ||
| 182 | { | ||
| 183 | } | ||
| 184 | |||
| 185 | static void suspend_test_finish(const char *label) | ||
| 186 | { | ||
| 187 | } | ||
| 188 | |||
| 189 | #endif | ||
| 190 | |||
| 191 | /* This is just an arbitrary number */ | ||
| 192 | #define FREE_PAGE_NUMBER (100) | ||
| 193 | |||
| 194 | static struct platform_suspend_ops *suspend_ops; | ||
| 195 | |||
| 196 | /** | ||
| 197 | * suspend_set_ops - Set the global suspend method table. | ||
| 198 | * @ops: Pointer to ops structure. | ||
| 199 | */ | ||
| 200 | |||
| 201 | void suspend_set_ops(struct platform_suspend_ops *ops) | ||
| 202 | { | ||
| 203 | mutex_lock(&pm_mutex); | ||
| 204 | suspend_ops = ops; | ||
| 205 | mutex_unlock(&pm_mutex); | ||
| 206 | } | ||
| 207 | |||
| 208 | /** | ||
| 209 | * suspend_valid_only_mem - generic memory-only valid callback | ||
| 210 | * | ||
| 211 | * Platform drivers that implement mem suspend only and only need | ||
| 212 | * to check for that in their .valid callback can use this instead | ||
| 213 | * of rolling their own .valid callback. | ||
| 214 | */ | ||
| 215 | int suspend_valid_only_mem(suspend_state_t state) | ||
| 216 | { | ||
| 217 | return state == PM_SUSPEND_MEM; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 221 | * suspend_prepare - Do prep work before entering low-power state. | ||
| 222 | * | ||
| 223 | * This is common code that is called for each state that we're entering. | ||
| 224 | * Run suspend notifiers, allocate a console and stop all processes. | ||
| 225 | */ | ||
| 226 | static int suspend_prepare(void) | ||
| 227 | { | ||
| 228 | int error; | ||
| 229 | unsigned int free_pages; | ||
| 230 | |||
| 231 | if (!suspend_ops || !suspend_ops->enter) | ||
| 232 | return -EPERM; | ||
| 233 | |||
| 234 | pm_prepare_console(); | ||
| 235 | |||
| 236 | error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); | ||
| 237 | if (error) | ||
| 238 | goto Finish; | ||
| 239 | |||
| 240 | error = usermodehelper_disable(); | ||
| 241 | if (error) | ||
| 242 | goto Finish; | ||
| 243 | |||
| 244 | if (suspend_freeze_processes()) { | ||
| 245 | error = -EAGAIN; | ||
| 246 | goto Thaw; | ||
| 247 | } | ||
| 248 | |||
| 249 | free_pages = global_page_state(NR_FREE_PAGES); | ||
| 250 | if (free_pages < FREE_PAGE_NUMBER) { | ||
| 251 | pr_debug("PM: free some memory\n"); | ||
| 252 | shrink_all_memory(FREE_PAGE_NUMBER - free_pages); | ||
| 253 | if (nr_free_pages() < FREE_PAGE_NUMBER) { | ||
| 254 | error = -ENOMEM; | ||
| 255 | printk(KERN_ERR "PM: No enough memory\n"); | ||
| 256 | } | ||
| 257 | } | ||
| 258 | if (!error) | ||
| 259 | return 0; | ||
| 260 | |||
| 261 | Thaw: | ||
| 262 | suspend_thaw_processes(); | ||
| 263 | usermodehelper_enable(); | ||
| 264 | Finish: | ||
| 265 | pm_notifier_call_chain(PM_POST_SUSPEND); | ||
| 266 | pm_restore_console(); | ||
| 267 | return error; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* default implementation */ | ||
| 271 | void __attribute__ ((weak)) arch_suspend_disable_irqs(void) | ||
| 272 | { | ||
| 273 | local_irq_disable(); | ||
| 274 | } | ||
| 275 | |||
| 276 | /* default implementation */ | ||
| 277 | void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | ||
| 278 | { | ||
| 279 | local_irq_enable(); | ||
| 280 | } | ||
| 281 | |||
| 282 | /** | ||
| 283 | * suspend_enter - enter the desired system sleep state. | ||
| 284 | * @state: state to enter | ||
| 285 | * | ||
| 286 | * This function should be called after devices have been suspended. | ||
| 287 | */ | ||
| 288 | static int suspend_enter(suspend_state_t state) | ||
| 289 | { | ||
| 290 | int error; | ||
| 291 | |||
| 292 | if (suspend_ops->prepare) { | ||
| 293 | error = suspend_ops->prepare(); | ||
| 294 | if (error) | ||
| 295 | return error; | ||
| 296 | } | ||
| 297 | |||
| 298 | error = device_power_down(PMSG_SUSPEND); | ||
| 299 | if (error) { | ||
| 300 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | ||
| 301 | goto Platfrom_finish; | ||
| 302 | } | ||
| 303 | |||
| 304 | if (suspend_ops->prepare_late) { | ||
| 305 | error = suspend_ops->prepare_late(); | ||
| 306 | if (error) | ||
| 307 | goto Power_up_devices; | ||
| 308 | } | ||
| 309 | |||
| 310 | if (suspend_test(TEST_PLATFORM)) | ||
| 311 | goto Platform_wake; | ||
| 312 | |||
| 313 | error = disable_nonboot_cpus(); | ||
| 314 | if (error || suspend_test(TEST_CPUS)) | ||
| 315 | goto Enable_cpus; | ||
| 316 | |||
| 317 | arch_suspend_disable_irqs(); | ||
| 318 | BUG_ON(!irqs_disabled()); | ||
| 319 | |||
| 320 | error = sysdev_suspend(PMSG_SUSPEND); | ||
| 321 | if (!error) { | ||
| 322 | if (!suspend_test(TEST_CORE)) | ||
| 323 | error = suspend_ops->enter(state); | ||
| 324 | sysdev_resume(); | ||
| 325 | } | ||
| 326 | |||
| 327 | arch_suspend_enable_irqs(); | ||
| 328 | BUG_ON(irqs_disabled()); | ||
| 329 | |||
| 330 | Enable_cpus: | ||
| 331 | enable_nonboot_cpus(); | ||
| 332 | |||
| 333 | Platform_wake: | ||
| 334 | if (suspend_ops->wake) | ||
| 335 | suspend_ops->wake(); | ||
| 336 | |||
| 337 | Power_up_devices: | ||
| 338 | device_power_up(PMSG_RESUME); | ||
| 339 | |||
| 340 | Platfrom_finish: | ||
| 341 | if (suspend_ops->finish) | ||
| 342 | suspend_ops->finish(); | ||
| 343 | |||
| 344 | return error; | ||
| 345 | } | ||
| 346 | |||
| 347 | /** | ||
| 348 | * suspend_devices_and_enter - suspend devices and enter the desired system | ||
| 349 | * sleep state. | ||
| 350 | * @state: state to enter | ||
| 351 | */ | ||
| 352 | int suspend_devices_and_enter(suspend_state_t state) | ||
| 353 | { | ||
| 354 | int error; | ||
| 355 | |||
| 356 | if (!suspend_ops) | ||
| 357 | return -ENOSYS; | ||
| 358 | |||
| 359 | if (suspend_ops->begin) { | ||
| 360 | error = suspend_ops->begin(state); | ||
| 361 | if (error) | ||
| 362 | goto Close; | ||
| 363 | } | ||
| 364 | suspend_console(); | ||
| 365 | suspend_test_start(); | ||
| 366 | error = device_suspend(PMSG_SUSPEND); | ||
| 367 | if (error) { | ||
| 368 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); | ||
| 369 | goto Recover_platform; | ||
| 370 | } | ||
| 371 | suspend_test_finish("suspend devices"); | ||
| 372 | if (suspend_test(TEST_DEVICES)) | ||
| 373 | goto Recover_platform; | ||
| 374 | |||
| 375 | suspend_enter(state); | ||
| 376 | |||
| 377 | Resume_devices: | ||
| 378 | suspend_test_start(); | ||
| 379 | device_resume(PMSG_RESUME); | ||
| 380 | suspend_test_finish("resume devices"); | ||
| 381 | resume_console(); | ||
| 382 | Close: | ||
| 383 | if (suspend_ops->end) | ||
| 384 | suspend_ops->end(); | ||
| 385 | return error; | ||
| 386 | |||
| 387 | Recover_platform: | ||
| 388 | if (suspend_ops->recover) | ||
| 389 | suspend_ops->recover(); | ||
| 390 | goto Resume_devices; | ||
| 391 | } | ||
| 392 | |||
| 393 | /** | ||
| 394 | * suspend_finish - Do final work before exiting suspend sequence. | ||
| 395 | * | ||
| 396 | * Call platform code to clean up, restart processes, and free the | ||
| 397 | * console that we've allocated. This is not called for suspend-to-disk. | ||
| 398 | */ | ||
| 399 | static void suspend_finish(void) | ||
| 400 | { | ||
| 401 | suspend_thaw_processes(); | ||
| 402 | usermodehelper_enable(); | ||
| 403 | pm_notifier_call_chain(PM_POST_SUSPEND); | ||
| 404 | pm_restore_console(); | ||
| 405 | } | ||
| 406 | |||
| 407 | |||
| 408 | |||
| 409 | |||
| 410 | static const char * const pm_states[PM_SUSPEND_MAX] = { | ||
| 411 | [PM_SUSPEND_STANDBY] = "standby", | ||
| 412 | [PM_SUSPEND_MEM] = "mem", | ||
| 413 | }; | ||
| 414 | |||
| 415 | static inline int valid_state(suspend_state_t state) | ||
| 416 | { | ||
| 417 | /* All states need lowlevel support and need to be valid | ||
| 418 | * to the lowlevel implementation, no valid callback | ||
| 419 | * implies that none are valid. */ | ||
| 420 | if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state)) | ||
| 421 | return 0; | ||
| 422 | return 1; | ||
| 423 | } | ||
| 424 | |||
| 425 | |||
| 426 | /** | ||
| 427 | * enter_state - Do common work of entering low-power state. | ||
| 428 | * @state: pm_state structure for state we're entering. | ||
| 429 | * | ||
| 430 | * Make sure we're the only ones trying to enter a sleep state. Fail | ||
| 431 | * if someone has beat us to it, since we don't want anything weird to | ||
| 432 | * happen when we wake up. | ||
| 433 | * Then, do the setup for suspend, enter the state, and cleaup (after | ||
| 434 | * we've woken up). | ||
| 435 | */ | ||
| 436 | static int enter_state(suspend_state_t state) | ||
| 437 | { | ||
| 438 | int error; | ||
| 439 | |||
| 440 | if (!valid_state(state)) | ||
| 441 | return -ENODEV; | ||
| 442 | |||
| 443 | if (!mutex_trylock(&pm_mutex)) | ||
| 444 | return -EBUSY; | ||
| 445 | |||
| 446 | printk(KERN_INFO "PM: Syncing filesystems ... "); | ||
| 447 | sys_sync(); | ||
| 448 | printk("done.\n"); | ||
| 449 | |||
| 450 | pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); | ||
| 451 | error = suspend_prepare(); | ||
| 452 | if (error) | ||
| 453 | goto Unlock; | ||
| 454 | |||
| 455 | if (suspend_test(TEST_FREEZER)) | ||
| 456 | goto Finish; | ||
| 457 | |||
| 458 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); | ||
| 459 | error = suspend_devices_and_enter(state); | ||
| 460 | |||
| 461 | Finish: | ||
| 462 | pr_debug("PM: Finishing wakeup.\n"); | ||
| 463 | suspend_finish(); | ||
| 464 | Unlock: | ||
| 465 | mutex_unlock(&pm_mutex); | ||
| 466 | return error; | ||
| 467 | } | ||
| 468 | |||
| 469 | |||
| 470 | /** | ||
| 471 | * pm_suspend - Externally visible function for suspending system. | ||
| 472 | * @state: Enumerated value of state to enter. | ||
| 473 | * | ||
| 474 | * Determine whether or not value is within range, get state | ||
| 475 | * structure, and enter (above). | ||
| 476 | */ | ||
| 477 | |||
| 478 | int pm_suspend(suspend_state_t state) | ||
| 479 | { | ||
| 480 | if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX) | ||
| 481 | return enter_state(state); | ||
| 482 | return -EINVAL; | ||
| 483 | } | ||
| 484 | |||
| 485 | EXPORT_SYMBOL(pm_suspend); | ||
| 486 | |||
| 487 | #endif /* CONFIG_SUSPEND */ | ||
| 488 | |||
| 489 | struct kobject *power_kobj; | 111 | struct kobject *power_kobj; |
| 490 | 112 | ||
| 491 | /** | 113 | /** |
| @@ -498,7 +120,6 @@ struct kobject *power_kobj; | |||
| 498 | * store() accepts one of those strings, translates it into the | 120 | * store() accepts one of those strings, translates it into the |
| 499 | * proper enumerated value, and initiates a suspend transition. | 121 | * proper enumerated value, and initiates a suspend transition. |
| 500 | */ | 122 | */ |
| 501 | |||
| 502 | static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, | 123 | static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, |
| 503 | char *buf) | 124 | char *buf) |
| 504 | { | 125 | { |
| @@ -596,7 +217,6 @@ static struct attribute_group attr_group = { | |||
| 596 | .attrs = g, | 217 | .attrs = g, |
| 597 | }; | 218 | }; |
| 598 | 219 | ||
| 599 | |||
| 600 | static int __init pm_init(void) | 220 | static int __init pm_init(void) |
| 601 | { | 221 | { |
| 602 | power_kobj = kobject_create_and_add("power", NULL); | 222 | power_kobj = kobject_create_and_add("power", NULL); |
| @@ -606,144 +226,3 @@ static int __init pm_init(void) | |||
| 606 | } | 226 | } |
| 607 | 227 | ||
| 608 | core_initcall(pm_init); | 228 | core_initcall(pm_init); |
| 609 | |||
| 610 | |||
| 611 | #ifdef CONFIG_PM_TEST_SUSPEND | ||
| 612 | |||
| 613 | #include <linux/rtc.h> | ||
| 614 | |||
| 615 | /* | ||
| 616 | * To test system suspend, we need a hands-off mechanism to resume the | ||
| 617 | * system. RTCs wake alarms are a common self-contained mechanism. | ||
| 618 | */ | ||
| 619 | |||
| 620 | static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | ||
| 621 | { | ||
| 622 | static char err_readtime[] __initdata = | ||
| 623 | KERN_ERR "PM: can't read %s time, err %d\n"; | ||
| 624 | static char err_wakealarm [] __initdata = | ||
| 625 | KERN_ERR "PM: can't set %s wakealarm, err %d\n"; | ||
| 626 | static char err_suspend[] __initdata = | ||
| 627 | KERN_ERR "PM: suspend test failed, error %d\n"; | ||
| 628 | static char info_test[] __initdata = | ||
| 629 | KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; | ||
| 630 | |||
| 631 | unsigned long now; | ||
| 632 | struct rtc_wkalrm alm; | ||
| 633 | int status; | ||
| 634 | |||
| 635 | /* this may fail if the RTC hasn't been initialized */ | ||
| 636 | status = rtc_read_time(rtc, &alm.time); | ||
| 637 | if (status < 0) { | ||
| 638 | printk(err_readtime, dev_name(&rtc->dev), status); | ||
| 639 | return; | ||
| 640 | } | ||
| 641 | rtc_tm_to_time(&alm.time, &now); | ||
| 642 | |||
| 643 | memset(&alm, 0, sizeof alm); | ||
| 644 | rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); | ||
| 645 | alm.enabled = true; | ||
| 646 | |||
| 647 | status = rtc_set_alarm(rtc, &alm); | ||
| 648 | if (status < 0) { | ||
| 649 | printk(err_wakealarm, dev_name(&rtc->dev), status); | ||
| 650 | return; | ||
| 651 | } | ||
| 652 | |||
| 653 | if (state == PM_SUSPEND_MEM) { | ||
| 654 | printk(info_test, pm_states[state]); | ||
| 655 | status = pm_suspend(state); | ||
| 656 | if (status == -ENODEV) | ||
| 657 | state = PM_SUSPEND_STANDBY; | ||
| 658 | } | ||
| 659 | if (state == PM_SUSPEND_STANDBY) { | ||
| 660 | printk(info_test, pm_states[state]); | ||
| 661 | status = pm_suspend(state); | ||
| 662 | } | ||
| 663 | if (status < 0) | ||
| 664 | printk(err_suspend, status); | ||
| 665 | |||
| 666 | /* Some platforms can't detect that the alarm triggered the | ||
| 667 | * wakeup, or (accordingly) disable it after it afterwards. | ||
| 668 | * It's supposed to give oneshot behavior; cope. | ||
| 669 | */ | ||
| 670 | alm.enabled = false; | ||
| 671 | rtc_set_alarm(rtc, &alm); | ||
| 672 | } | ||
| 673 | |||
| 674 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
| 675 | { | ||
| 676 | struct rtc_device *candidate = to_rtc_device(dev); | ||
| 677 | |||
| 678 | if (!candidate->ops->set_alarm) | ||
| 679 | return 0; | ||
| 680 | if (!device_may_wakeup(candidate->dev.parent)) | ||
| 681 | return 0; | ||
| 682 | |||
| 683 | *(const char **)name_ptr = dev_name(dev); | ||
| 684 | return 1; | ||
| 685 | } | ||
| 686 | |||
| 687 | /* | ||
| 688 | * Kernel options like "test_suspend=mem" force suspend/resume sanity tests | ||
| 689 | * at startup time. They're normally disabled, for faster boot and because | ||
| 690 | * we can't know which states really work on this particular system. | ||
| 691 | */ | ||
| 692 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | ||
| 693 | |||
| 694 | static char warn_bad_state[] __initdata = | ||
| 695 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | ||
| 696 | |||
| 697 | static int __init setup_test_suspend(char *value) | ||
| 698 | { | ||
| 699 | unsigned i; | ||
| 700 | |||
| 701 | /* "=mem" ==> "mem" */ | ||
| 702 | value++; | ||
| 703 | for (i = 0; i < PM_SUSPEND_MAX; i++) { | ||
| 704 | if (!pm_states[i]) | ||
| 705 | continue; | ||
| 706 | if (strcmp(pm_states[i], value) != 0) | ||
| 707 | continue; | ||
| 708 | test_state = (__force suspend_state_t) i; | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | printk(warn_bad_state, value); | ||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | __setup("test_suspend", setup_test_suspend); | ||
| 715 | |||
| 716 | static int __init test_suspend(void) | ||
| 717 | { | ||
| 718 | static char warn_no_rtc[] __initdata = | ||
| 719 | KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n"; | ||
| 720 | |||
| 721 | char *pony = NULL; | ||
| 722 | struct rtc_device *rtc = NULL; | ||
| 723 | |||
| 724 | /* PM is initialized by now; is that state testable? */ | ||
| 725 | if (test_state == PM_SUSPEND_ON) | ||
| 726 | goto done; | ||
| 727 | if (!valid_state(test_state)) { | ||
| 728 | printk(warn_bad_state, pm_states[test_state]); | ||
| 729 | goto done; | ||
| 730 | } | ||
| 731 | |||
| 732 | /* RTCs have initialized by now too ... can we use one? */ | ||
| 733 | class_find_device(rtc_class, NULL, &pony, has_wakealarm); | ||
| 734 | if (pony) | ||
| 735 | rtc = rtc_class_open(pony); | ||
| 736 | if (!rtc) { | ||
| 737 | printk(warn_no_rtc); | ||
| 738 | goto done; | ||
| 739 | } | ||
| 740 | |||
| 741 | /* go for it */ | ||
| 742 | test_wakealarm(rtc, test_state); | ||
| 743 | rtc_class_close(rtc); | ||
| 744 | done: | ||
| 745 | return 0; | ||
| 746 | } | ||
| 747 | late_initcall(test_suspend); | ||
| 748 | |||
| 749 | #endif /* CONFIG_PM_TEST_SUSPEND */ | ||
diff --git a/kernel/power/power.h b/kernel/power/power.h index 46b5ec7a3afb..26d5a26f82e3 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
| @@ -45,7 +45,7 @@ static inline char *check_image_kernel(struct swsusp_info *info) | |||
| 45 | */ | 45 | */ |
| 46 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) | 46 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) |
| 47 | 47 | ||
| 48 | /* kernel/power/disk.c */ | 48 | /* kernel/power/hibernate.c */ |
| 49 | extern int hibernation_snapshot(int platform_mode); | 49 | extern int hibernation_snapshot(int platform_mode); |
| 50 | extern int hibernation_restore(int platform_mode); | 50 | extern int hibernation_restore(int platform_mode); |
| 51 | extern int hibernation_platform_enter(void); | 51 | extern int hibernation_platform_enter(void); |
| @@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void); | |||
| 74 | 74 | ||
| 75 | extern int create_basic_memory_bitmaps(void); | 75 | extern int create_basic_memory_bitmaps(void); |
| 76 | extern void free_basic_memory_bitmaps(void); | 76 | extern void free_basic_memory_bitmaps(void); |
| 77 | extern unsigned int count_data_pages(void); | 77 | extern int swsusp_shrink_memory(void); |
| 78 | 78 | ||
| 79 | /** | 79 | /** |
| 80 | * Auxiliary structure used for reading the snapshot image data and | 80 | * Auxiliary structure used for reading the snapshot image data and |
| @@ -147,9 +147,8 @@ extern int swsusp_swap_in_use(void); | |||
| 147 | */ | 147 | */ |
| 148 | #define SF_PLATFORM_MODE 1 | 148 | #define SF_PLATFORM_MODE 1 |
| 149 | 149 | ||
| 150 | /* kernel/power/disk.c */ | 150 | /* kernel/power/hibernate.c */ |
| 151 | extern int swsusp_check(void); | 151 | extern int swsusp_check(void); |
| 152 | extern int swsusp_shrink_memory(void); | ||
| 153 | extern void swsusp_free(void); | 152 | extern void swsusp_free(void); |
| 154 | extern int swsusp_read(unsigned int *flags_p); | 153 | extern int swsusp_read(unsigned int *flags_p); |
| 155 | extern int swsusp_write(unsigned int flags); | 154 | extern int swsusp_write(unsigned int flags); |
| @@ -161,22 +160,36 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *, | |||
| 161 | unsigned int, char *); | 160 | unsigned int, char *); |
| 162 | 161 | ||
| 163 | #ifdef CONFIG_SUSPEND | 162 | #ifdef CONFIG_SUSPEND |
| 164 | /* kernel/power/main.c */ | 163 | /* kernel/power/suspend.c */ |
| 164 | extern const char *const pm_states[]; | ||
| 165 | |||
| 166 | extern bool valid_state(suspend_state_t state); | ||
| 165 | extern int suspend_devices_and_enter(suspend_state_t state); | 167 | extern int suspend_devices_and_enter(suspend_state_t state); |
| 168 | extern int enter_state(suspend_state_t state); | ||
| 166 | #else /* !CONFIG_SUSPEND */ | 169 | #else /* !CONFIG_SUSPEND */ |
| 167 | static inline int suspend_devices_and_enter(suspend_state_t state) | 170 | static inline int suspend_devices_and_enter(suspend_state_t state) |
| 168 | { | 171 | { |
| 169 | return -ENOSYS; | 172 | return -ENOSYS; |
| 170 | } | 173 | } |
| 174 | static inline int enter_state(suspend_state_t state) { return -ENOSYS; } | ||
| 175 | static inline bool valid_state(suspend_state_t state) { return false; } | ||
| 171 | #endif /* !CONFIG_SUSPEND */ | 176 | #endif /* !CONFIG_SUSPEND */ |
| 172 | 177 | ||
| 178 | #ifdef CONFIG_PM_TEST_SUSPEND | ||
| 179 | /* kernel/power/suspend_test.c */ | ||
| 180 | extern void suspend_test_start(void); | ||
| 181 | extern void suspend_test_finish(const char *label); | ||
| 182 | #else /* !CONFIG_PM_TEST_SUSPEND */ | ||
| 183 | static inline void suspend_test_start(void) {} | ||
| 184 | static inline void suspend_test_finish(const char *label) {} | ||
| 185 | #endif /* !CONFIG_PM_TEST_SUSPEND */ | ||
| 186 | |||
| 173 | #ifdef CONFIG_PM_SLEEP | 187 | #ifdef CONFIG_PM_SLEEP |
| 174 | /* kernel/power/main.c */ | 188 | /* kernel/power/main.c */ |
| 175 | extern int pm_notifier_call_chain(unsigned long val); | 189 | extern int pm_notifier_call_chain(unsigned long val); |
| 176 | #endif | 190 | #endif |
| 177 | 191 | ||
| 178 | #ifdef CONFIG_HIGHMEM | 192 | #ifdef CONFIG_HIGHMEM |
| 179 | unsigned int count_highmem_pages(void); | ||
| 180 | int restore_highmem(void); | 193 | int restore_highmem(void); |
| 181 | #else | 194 | #else |
| 182 | static inline unsigned int count_highmem_pages(void) { return 0; } | 195 | static inline unsigned int count_highmem_pages(void) { return 0; } |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 33e2e4a819f9..523a451b45d3 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -39,6 +39,14 @@ static int swsusp_page_is_free(struct page *); | |||
| 39 | static void swsusp_set_page_forbidden(struct page *); | 39 | static void swsusp_set_page_forbidden(struct page *); |
| 40 | static void swsusp_unset_page_forbidden(struct page *); | 40 | static void swsusp_unset_page_forbidden(struct page *); |
| 41 | 41 | ||
| 42 | /* | ||
| 43 | * Preferred image size in bytes (tunable via /sys/power/image_size). | ||
| 44 | * When it is set to N, swsusp will do its best to ensure the image | ||
| 45 | * size will not exceed N bytes, but if that is impossible, it will | ||
| 46 | * try to create the smallest image possible. | ||
| 47 | */ | ||
| 48 | unsigned long image_size = 500 * 1024 * 1024; | ||
| 49 | |||
| 42 | /* List of PBEs needed for restoring the pages that were allocated before | 50 | /* List of PBEs needed for restoring the pages that were allocated before |
| 43 | * the suspend and included in the suspend image, but have also been | 51 | * the suspend and included in the suspend image, but have also been |
| 44 | * allocated by the "resume" kernel, so their contents cannot be written | 52 | * allocated by the "resume" kernel, so their contents cannot be written |
| @@ -840,7 +848,7 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) | |||
| 840 | * pages. | 848 | * pages. |
| 841 | */ | 849 | */ |
| 842 | 850 | ||
| 843 | unsigned int count_highmem_pages(void) | 851 | static unsigned int count_highmem_pages(void) |
| 844 | { | 852 | { |
| 845 | struct zone *zone; | 853 | struct zone *zone; |
| 846 | unsigned int n = 0; | 854 | unsigned int n = 0; |
| @@ -902,7 +910,7 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |||
| 902 | * pages. | 910 | * pages. |
| 903 | */ | 911 | */ |
| 904 | 912 | ||
| 905 | unsigned int count_data_pages(void) | 913 | static unsigned int count_data_pages(void) |
| 906 | { | 914 | { |
| 907 | struct zone *zone; | 915 | struct zone *zone; |
| 908 | unsigned long pfn, max_zone_pfn; | 916 | unsigned long pfn, max_zone_pfn; |
| @@ -1058,6 +1066,74 @@ void swsusp_free(void) | |||
| 1058 | buffer = NULL; | 1066 | buffer = NULL; |
| 1059 | } | 1067 | } |
| 1060 | 1068 | ||
| 1069 | /** | ||
| 1070 | * swsusp_shrink_memory - Try to free as much memory as needed | ||
| 1071 | * | ||
| 1072 | * ... but do not OOM-kill anyone | ||
| 1073 | * | ||
| 1074 | * Notice: all userland should be stopped before it is called, or | ||
| 1075 | * livelock is possible. | ||
| 1076 | */ | ||
| 1077 | |||
| 1078 | #define SHRINK_BITE 10000 | ||
| 1079 | static inline unsigned long __shrink_memory(long tmp) | ||
| 1080 | { | ||
| 1081 | if (tmp > SHRINK_BITE) | ||
| 1082 | tmp = SHRINK_BITE; | ||
| 1083 | return shrink_all_memory(tmp); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | int swsusp_shrink_memory(void) | ||
| 1087 | { | ||
| 1088 | long tmp; | ||
| 1089 | struct zone *zone; | ||
| 1090 | unsigned long pages = 0; | ||
| 1091 | unsigned int i = 0; | ||
| 1092 | char *p = "-\\|/"; | ||
| 1093 | struct timeval start, stop; | ||
| 1094 | |||
| 1095 | printk(KERN_INFO "PM: Shrinking memory... "); | ||
| 1096 | do_gettimeofday(&start); | ||
| 1097 | do { | ||
| 1098 | long size, highmem_size; | ||
| 1099 | |||
| 1100 | highmem_size = count_highmem_pages(); | ||
| 1101 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; | ||
| 1102 | tmp = size; | ||
| 1103 | size += highmem_size; | ||
| 1104 | for_each_populated_zone(zone) { | ||
| 1105 | tmp += snapshot_additional_pages(zone); | ||
| 1106 | if (is_highmem(zone)) { | ||
| 1107 | highmem_size -= | ||
| 1108 | zone_page_state(zone, NR_FREE_PAGES); | ||
| 1109 | } else { | ||
| 1110 | tmp -= zone_page_state(zone, NR_FREE_PAGES); | ||
| 1111 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; | ||
| 1112 | } | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | if (highmem_size < 0) | ||
| 1116 | highmem_size = 0; | ||
| 1117 | |||
| 1118 | tmp += highmem_size; | ||
| 1119 | if (tmp > 0) { | ||
| 1120 | tmp = __shrink_memory(tmp); | ||
| 1121 | if (!tmp) | ||
| 1122 | return -ENOMEM; | ||
| 1123 | pages += tmp; | ||
| 1124 | } else if (size > image_size / PAGE_SIZE) { | ||
| 1125 | tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); | ||
| 1126 | pages += tmp; | ||
| 1127 | } | ||
| 1128 | printk("\b%c", p[i++%4]); | ||
| 1129 | } while (tmp > 0); | ||
| 1130 | do_gettimeofday(&stop); | ||
| 1131 | printk("\bdone (%lu pages freed)\n", pages); | ||
| 1132 | swsusp_show_speed(&start, &stop, pages, "Freed"); | ||
| 1133 | |||
| 1134 | return 0; | ||
| 1135 | } | ||
| 1136 | |||
| 1061 | #ifdef CONFIG_HIGHMEM | 1137 | #ifdef CONFIG_HIGHMEM |
| 1062 | /** | 1138 | /** |
| 1063 | * count_pages_for_highmem - compute the number of non-highmem pages | 1139 | * count_pages_for_highmem - compute the number of non-highmem pages |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c new file mode 100644 index 000000000000..6f10dfc2d3e9 --- /dev/null +++ b/kernel/power/suspend.c | |||
| @@ -0,0 +1,300 @@ | |||
| 1 | /* | ||
| 2 | * kernel/power/suspend.c - Suspend to RAM and standby functionality. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2003 Patrick Mochel | ||
| 5 | * Copyright (c) 2003 Open Source Development Lab | ||
| 6 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 7 | * | ||
| 8 | * This file is released under the GPLv2. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/string.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/errno.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/console.h> | ||
| 16 | #include <linux/cpu.h> | ||
| 17 | #include <linux/syscalls.h> | ||
| 18 | |||
| 19 | #include "power.h" | ||
| 20 | |||
| 21 | const char *const pm_states[PM_SUSPEND_MAX] = { | ||
| 22 | [PM_SUSPEND_STANDBY] = "standby", | ||
| 23 | [PM_SUSPEND_MEM] = "mem", | ||
| 24 | }; | ||
| 25 | |||
| 26 | static struct platform_suspend_ops *suspend_ops; | ||
| 27 | |||
| 28 | /** | ||
| 29 | * suspend_set_ops - Set the global suspend method table. | ||
| 30 | * @ops: Pointer to ops structure. | ||
| 31 | */ | ||
| 32 | void suspend_set_ops(struct platform_suspend_ops *ops) | ||
| 33 | { | ||
| 34 | mutex_lock(&pm_mutex); | ||
| 35 | suspend_ops = ops; | ||
| 36 | mutex_unlock(&pm_mutex); | ||
| 37 | } | ||
| 38 | |||
| 39 | bool valid_state(suspend_state_t state) | ||
| 40 | { | ||
| 41 | /* | ||
| 42 | * All states need lowlevel support and need to be valid to the lowlevel | ||
| 43 | * implementation, no valid callback implies that none are valid. | ||
| 44 | */ | ||
| 45 | return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); | ||
| 46 | } | ||
| 47 | |||
| 48 | /** | ||
| 49 | * suspend_valid_only_mem - generic memory-only valid callback | ||
| 50 | * | ||
| 51 | * Platform drivers that implement mem suspend only and only need | ||
| 52 | * to check for that in their .valid callback can use this instead | ||
| 53 | * of rolling their own .valid callback. | ||
| 54 | */ | ||
| 55 | int suspend_valid_only_mem(suspend_state_t state) | ||
| 56 | { | ||
| 57 | return state == PM_SUSPEND_MEM; | ||
| 58 | } | ||
| 59 | |||
| 60 | static int suspend_test(int level) | ||
| 61 | { | ||
| 62 | #ifdef CONFIG_PM_DEBUG | ||
| 63 | if (pm_test_level == level) { | ||
| 64 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
| 65 | mdelay(5000); | ||
| 66 | return 1; | ||
| 67 | } | ||
| 68 | #endif /* !CONFIG_PM_DEBUG */ | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | ||
| 73 | * suspend_prepare - Do prep work before entering low-power state. | ||
| 74 | * | ||
| 75 | * This is common code that is called for each state that we're entering. | ||
| 76 | * Run suspend notifiers, allocate a console and stop all processes. | ||
| 77 | */ | ||
| 78 | static int suspend_prepare(void) | ||
| 79 | { | ||
| 80 | int error; | ||
| 81 | |||
| 82 | if (!suspend_ops || !suspend_ops->enter) | ||
| 83 | return -EPERM; | ||
| 84 | |||
| 85 | pm_prepare_console(); | ||
| 86 | |||
| 87 | error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); | ||
| 88 | if (error) | ||
| 89 | goto Finish; | ||
| 90 | |||
| 91 | error = usermodehelper_disable(); | ||
| 92 | if (error) | ||
| 93 | goto Finish; | ||
| 94 | |||
| 95 | error = suspend_freeze_processes(); | ||
| 96 | if (!error) | ||
| 97 | return 0; | ||
| 98 | |||
| 99 | suspend_thaw_processes(); | ||
| 100 | usermodehelper_enable(); | ||
| 101 | Finish: | ||
| 102 | pm_notifier_call_chain(PM_POST_SUSPEND); | ||
| 103 | pm_restore_console(); | ||
| 104 | return error; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* default implementation */ | ||
| 108 | void __attribute__ ((weak)) arch_suspend_disable_irqs(void) | ||
| 109 | { | ||
| 110 | local_irq_disable(); | ||
| 111 | } | ||
| 112 | |||
| 113 | /* default implementation */ | ||
| 114 | void __attribute__ ((weak)) arch_suspend_enable_irqs(void) | ||
| 115 | { | ||
| 116 | local_irq_enable(); | ||
| 117 | } | ||
| 118 | |||
| 119 | /** | ||
| 120 | * suspend_enter - enter the desired system sleep state. | ||
| 121 | * @state: state to enter | ||
| 122 | * | ||
| 123 | * This function should be called after devices have been suspended. | ||
| 124 | */ | ||
| 125 | static int suspend_enter(suspend_state_t state) | ||
| 126 | { | ||
| 127 | int error; | ||
| 128 | |||
| 129 | if (suspend_ops->prepare) { | ||
| 130 | error = suspend_ops->prepare(); | ||
| 131 | if (error) | ||
| 132 | return error; | ||
| 133 | } | ||
| 134 | |||
| 135 | error = dpm_suspend_noirq(PMSG_SUSPEND); | ||
| 136 | if (error) { | ||
| 137 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | ||
| 138 | goto Platfrom_finish; | ||
| 139 | } | ||
| 140 | |||
| 141 | if (suspend_ops->prepare_late) { | ||
| 142 | error = suspend_ops->prepare_late(); | ||
| 143 | if (error) | ||
| 144 | goto Power_up_devices; | ||
| 145 | } | ||
| 146 | |||
| 147 | if (suspend_test(TEST_PLATFORM)) | ||
| 148 | goto Platform_wake; | ||
| 149 | |||
| 150 | error = disable_nonboot_cpus(); | ||
| 151 | if (error || suspend_test(TEST_CPUS)) | ||
| 152 | goto Enable_cpus; | ||
| 153 | |||
| 154 | arch_suspend_disable_irqs(); | ||
| 155 | BUG_ON(!irqs_disabled()); | ||
| 156 | |||
| 157 | error = sysdev_suspend(PMSG_SUSPEND); | ||
| 158 | if (!error) { | ||
| 159 | if (!suspend_test(TEST_CORE)) | ||
| 160 | error = suspend_ops->enter(state); | ||
| 161 | sysdev_resume(); | ||
| 162 | } | ||
| 163 | |||
| 164 | arch_suspend_enable_irqs(); | ||
| 165 | BUG_ON(irqs_disabled()); | ||
| 166 | |||
| 167 | Enable_cpus: | ||
| 168 | enable_nonboot_cpus(); | ||
| 169 | |||
| 170 | Platform_wake: | ||
| 171 | if (suspend_ops->wake) | ||
| 172 | suspend_ops->wake(); | ||
| 173 | |||
| 174 | Power_up_devices: | ||
| 175 | dpm_resume_noirq(PMSG_RESUME); | ||
| 176 | |||
| 177 | Platfrom_finish: | ||
| 178 | if (suspend_ops->finish) | ||
| 179 | suspend_ops->finish(); | ||
| 180 | |||
| 181 | return error; | ||
| 182 | } | ||
| 183 | |||
| 184 | /** | ||
| 185 | * suspend_devices_and_enter - suspend devices and enter the desired system | ||
| 186 | * sleep state. | ||
| 187 | * @state: state to enter | ||
| 188 | */ | ||
| 189 | int suspend_devices_and_enter(suspend_state_t state) | ||
| 190 | { | ||
| 191 | int error; | ||
| 192 | |||
| 193 | if (!suspend_ops) | ||
| 194 | return -ENOSYS; | ||
| 195 | |||
| 196 | if (suspend_ops->begin) { | ||
| 197 | error = suspend_ops->begin(state); | ||
| 198 | if (error) | ||
| 199 | goto Close; | ||
| 200 | } | ||
| 201 | suspend_console(); | ||
| 202 | suspend_test_start(); | ||
| 203 | error = dpm_suspend_start(PMSG_SUSPEND); | ||
| 204 | if (error) { | ||
| 205 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); | ||
| 206 | goto Recover_platform; | ||
| 207 | } | ||
| 208 | suspend_test_finish("suspend devices"); | ||
| 209 | if (suspend_test(TEST_DEVICES)) | ||
| 210 | goto Recover_platform; | ||
| 211 | |||
| 212 | suspend_enter(state); | ||
| 213 | |||
| 214 | Resume_devices: | ||
| 215 | suspend_test_start(); | ||
| 216 | dpm_resume_end(PMSG_RESUME); | ||
| 217 | suspend_test_finish("resume devices"); | ||
| 218 | resume_console(); | ||
| 219 | Close: | ||
| 220 | if (suspend_ops->end) | ||
| 221 | suspend_ops->end(); | ||
| 222 | return error; | ||
| 223 | |||
| 224 | Recover_platform: | ||
| 225 | if (suspend_ops->recover) | ||
| 226 | suspend_ops->recover(); | ||
| 227 | goto Resume_devices; | ||
| 228 | } | ||
| 229 | |||
| 230 | /** | ||
| 231 | * suspend_finish - Do final work before exiting suspend sequence. | ||
| 232 | * | ||
| 233 | * Call platform code to clean up, restart processes, and free the | ||
| 234 | * console that we've allocated. This is not called for suspend-to-disk. | ||
| 235 | */ | ||
| 236 | static void suspend_finish(void) | ||
| 237 | { | ||
| 238 | suspend_thaw_processes(); | ||
| 239 | usermodehelper_enable(); | ||
| 240 | pm_notifier_call_chain(PM_POST_SUSPEND); | ||
| 241 | pm_restore_console(); | ||
| 242 | } | ||
| 243 | |||
| 244 | /** | ||
| 245 | * enter_state - Do common work of entering low-power state. | ||
| 246 | * @state: pm_state structure for state we're entering. | ||
| 247 | * | ||
| 248 | * Make sure we're the only ones trying to enter a sleep state. Fail | ||
| 249 | * if someone has beat us to it, since we don't want anything weird to | ||
| 250 | * happen when we wake up. | ||
| 251 | * Then, do the setup for suspend, enter the state, and cleaup (after | ||
| 252 | * we've woken up). | ||
| 253 | */ | ||
| 254 | int enter_state(suspend_state_t state) | ||
| 255 | { | ||
| 256 | int error; | ||
| 257 | |||
| 258 | if (!valid_state(state)) | ||
| 259 | return -ENODEV; | ||
| 260 | |||
| 261 | if (!mutex_trylock(&pm_mutex)) | ||
| 262 | return -EBUSY; | ||
| 263 | |||
| 264 | printk(KERN_INFO "PM: Syncing filesystems ... "); | ||
| 265 | sys_sync(); | ||
| 266 | printk("done.\n"); | ||
| 267 | |||
| 268 | pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); | ||
| 269 | error = suspend_prepare(); | ||
| 270 | if (error) | ||
| 271 | goto Unlock; | ||
| 272 | |||
| 273 | if (suspend_test(TEST_FREEZER)) | ||
| 274 | goto Finish; | ||
| 275 | |||
| 276 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); | ||
| 277 | error = suspend_devices_and_enter(state); | ||
| 278 | |||
| 279 | Finish: | ||
| 280 | pr_debug("PM: Finishing wakeup.\n"); | ||
| 281 | suspend_finish(); | ||
| 282 | Unlock: | ||
| 283 | mutex_unlock(&pm_mutex); | ||
| 284 | return error; | ||
| 285 | } | ||
| 286 | |||
| 287 | /** | ||
| 288 | * pm_suspend - Externally visible function for suspending system. | ||
| 289 | * @state: Enumerated value of state to enter. | ||
| 290 | * | ||
| 291 | * Determine whether or not value is within range, get state | ||
| 292 | * structure, and enter (above). | ||
| 293 | */ | ||
| 294 | int pm_suspend(suspend_state_t state) | ||
| 295 | { | ||
| 296 | if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX) | ||
| 297 | return enter_state(state); | ||
| 298 | return -EINVAL; | ||
| 299 | } | ||
| 300 | EXPORT_SYMBOL(pm_suspend); | ||
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c new file mode 100644 index 000000000000..17d8bb1acf9c --- /dev/null +++ b/kernel/power/suspend_test.c | |||
| @@ -0,0 +1,187 @@ | |||
| 1 | /* | ||
| 2 | * kernel/power/suspend_test.c - Suspend to RAM and standby test facility. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz> | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/rtc.h> | ||
| 11 | |||
| 12 | #include "power.h" | ||
| 13 | |||
| 14 | /* | ||
| 15 | * We test the system suspend code by setting an RTC wakealarm a short | ||
| 16 | * time in the future, then suspending. Suspending the devices won't | ||
| 17 | * normally take long ... some systems only need a few milliseconds. | ||
| 18 | * | ||
| 19 | * The time it takes is system-specific though, so when we test this | ||
| 20 | * during system bootup we allow a LOT of time. | ||
| 21 | */ | ||
| 22 | #define TEST_SUSPEND_SECONDS 5 | ||
| 23 | |||
| 24 | static unsigned long suspend_test_start_time; | ||
| 25 | |||
| 26 | void suspend_test_start(void) | ||
| 27 | { | ||
| 28 | /* FIXME Use better timebase than "jiffies", ideally a clocksource. | ||
| 29 | * What we want is a hardware counter that will work correctly even | ||
| 30 | * during the irqs-are-off stages of the suspend/resume cycle... | ||
| 31 | */ | ||
| 32 | suspend_test_start_time = jiffies; | ||
| 33 | } | ||
| 34 | |||
| 35 | void suspend_test_finish(const char *label) | ||
| 36 | { | ||
| 37 | long nj = jiffies - suspend_test_start_time; | ||
| 38 | unsigned msec; | ||
| 39 | |||
| 40 | msec = jiffies_to_msecs(abs(nj)); | ||
| 41 | pr_info("PM: %s took %d.%03d seconds\n", label, | ||
| 42 | msec / 1000, msec % 1000); | ||
| 43 | |||
| 44 | /* Warning on suspend means the RTC alarm period needs to be | ||
| 45 | * larger -- the system was sooo slooowwww to suspend that the | ||
| 46 | * alarm (should have) fired before the system went to sleep! | ||
| 47 | * | ||
| 48 | * Warning on either suspend or resume also means the system | ||
| 49 | * has some performance issues. The stack dump of a WARN_ON | ||
| 50 | * is more likely to get the right attention than a printk... | ||
| 51 | */ | ||
| 52 | WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); | ||
| 53 | } | ||
| 54 | |||
| 55 | /* | ||
| 56 | * To test system suspend, we need a hands-off mechanism to resume the | ||
| 57 | * system. RTCs wake alarms are a common self-contained mechanism. | ||
| 58 | */ | ||
| 59 | |||
| 60 | static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | ||
| 61 | { | ||
| 62 | static char err_readtime[] __initdata = | ||
| 63 | KERN_ERR "PM: can't read %s time, err %d\n"; | ||
| 64 | static char err_wakealarm [] __initdata = | ||
| 65 | KERN_ERR "PM: can't set %s wakealarm, err %d\n"; | ||
| 66 | static char err_suspend[] __initdata = | ||
| 67 | KERN_ERR "PM: suspend test failed, error %d\n"; | ||
| 68 | static char info_test[] __initdata = | ||
| 69 | KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; | ||
| 70 | |||
| 71 | unsigned long now; | ||
| 72 | struct rtc_wkalrm alm; | ||
| 73 | int status; | ||
| 74 | |||
| 75 | /* this may fail if the RTC hasn't been initialized */ | ||
| 76 | status = rtc_read_time(rtc, &alm.time); | ||
| 77 | if (status < 0) { | ||
| 78 | printk(err_readtime, dev_name(&rtc->dev), status); | ||
| 79 | return; | ||
| 80 | } | ||
| 81 | rtc_tm_to_time(&alm.time, &now); | ||
| 82 | |||
| 83 | memset(&alm, 0, sizeof alm); | ||
| 84 | rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); | ||
| 85 | alm.enabled = true; | ||
| 86 | |||
| 87 | status = rtc_set_alarm(rtc, &alm); | ||
| 88 | if (status < 0) { | ||
| 89 | printk(err_wakealarm, dev_name(&rtc->dev), status); | ||
| 90 | return; | ||
| 91 | } | ||
| 92 | |||
| 93 | if (state == PM_SUSPEND_MEM) { | ||
| 94 | printk(info_test, pm_states[state]); | ||
| 95 | status = pm_suspend(state); | ||
| 96 | if (status == -ENODEV) | ||
| 97 | state = PM_SUSPEND_STANDBY; | ||
| 98 | } | ||
| 99 | if (state == PM_SUSPEND_STANDBY) { | ||
| 100 | printk(info_test, pm_states[state]); | ||
| 101 | status = pm_suspend(state); | ||
| 102 | } | ||
| 103 | if (status < 0) | ||
| 104 | printk(err_suspend, status); | ||
| 105 | |||
| 106 | /* Some platforms can't detect that the alarm triggered the | ||
| 107 | * wakeup, or (accordingly) disable it after it afterwards. | ||
| 108 | * It's supposed to give oneshot behavior; cope. | ||
| 109 | */ | ||
| 110 | alm.enabled = false; | ||
| 111 | rtc_set_alarm(rtc, &alm); | ||
| 112 | } | ||
| 113 | |||
| 114 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
| 115 | { | ||
| 116 | struct rtc_device *candidate = to_rtc_device(dev); | ||
| 117 | |||
| 118 | if (!candidate->ops->set_alarm) | ||
| 119 | return 0; | ||
| 120 | if (!device_may_wakeup(candidate->dev.parent)) | ||
| 121 | return 0; | ||
| 122 | |||
| 123 | *(const char **)name_ptr = dev_name(dev); | ||
| 124 | return 1; | ||
| 125 | } | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Kernel options like "test_suspend=mem" force suspend/resume sanity tests | ||
| 129 | * at startup time. They're normally disabled, for faster boot and because | ||
| 130 | * we can't know which states really work on this particular system. | ||
| 131 | */ | ||
| 132 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | ||
| 133 | |||
| 134 | static char warn_bad_state[] __initdata = | ||
| 135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | ||
| 136 | |||
| 137 | static int __init setup_test_suspend(char *value) | ||
| 138 | { | ||
| 139 | unsigned i; | ||
| 140 | |||
| 141 | /* "=mem" ==> "mem" */ | ||
| 142 | value++; | ||
| 143 | for (i = 0; i < PM_SUSPEND_MAX; i++) { | ||
| 144 | if (!pm_states[i]) | ||
| 145 | continue; | ||
| 146 | if (strcmp(pm_states[i], value) != 0) | ||
| 147 | continue; | ||
| 148 | test_state = (__force suspend_state_t) i; | ||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | printk(warn_bad_state, value); | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | __setup("test_suspend", setup_test_suspend); | ||
| 155 | |||
| 156 | static int __init test_suspend(void) | ||
| 157 | { | ||
| 158 | static char warn_no_rtc[] __initdata = | ||
| 159 | KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n"; | ||
| 160 | |||
| 161 | char *pony = NULL; | ||
| 162 | struct rtc_device *rtc = NULL; | ||
| 163 | |||
| 164 | /* PM is initialized by now; is that state testable? */ | ||
| 165 | if (test_state == PM_SUSPEND_ON) | ||
| 166 | goto done; | ||
| 167 | if (!valid_state(test_state)) { | ||
| 168 | printk(warn_bad_state, pm_states[test_state]); | ||
| 169 | goto done; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* RTCs have initialized by now too ... can we use one? */ | ||
| 173 | class_find_device(rtc_class, NULL, &pony, has_wakealarm); | ||
| 174 | if (pony) | ||
| 175 | rtc = rtc_class_open(pony); | ||
| 176 | if (!rtc) { | ||
| 177 | printk(warn_no_rtc); | ||
| 178 | goto done; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* go for it */ | ||
| 182 | test_wakealarm(rtc, test_state); | ||
| 183 | rtc_class_close(rtc); | ||
| 184 | done: | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | late_initcall(test_suspend); | ||
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 78c35047586d..6a07f4dbf2f8 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
| @@ -55,14 +55,6 @@ | |||
| 55 | 55 | ||
| 56 | #include "power.h" | 56 | #include "power.h" |
| 57 | 57 | ||
| 58 | /* | ||
| 59 | * Preferred image size in bytes (tunable via /sys/power/image_size). | ||
| 60 | * When it is set to N, swsusp will do its best to ensure the image | ||
| 61 | * size will not exceed N bytes, but if that is impossible, it will | ||
| 62 | * try to create the smallest image possible. | ||
| 63 | */ | ||
| 64 | unsigned long image_size = 500 * 1024 * 1024; | ||
| 65 | |||
| 66 | int in_suspend __nosavedata = 0; | 58 | int in_suspend __nosavedata = 0; |
| 67 | 59 | ||
| 68 | /** | 60 | /** |
| @@ -194,193 +186,3 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop, | |||
| 194 | centisecs / 100, centisecs % 100, | 186 | centisecs / 100, centisecs % 100, |
| 195 | kps / 1000, (kps % 1000) / 10); | 187 | kps / 1000, (kps % 1000) / 10); |
| 196 | } | 188 | } |
| 197 | |||
| 198 | /** | ||
| 199 | * swsusp_shrink_memory - Try to free as much memory as needed | ||
| 200 | * | ||
| 201 | * ... but do not OOM-kill anyone | ||
| 202 | * | ||
| 203 | * Notice: all userland should be stopped before it is called, or | ||
| 204 | * livelock is possible. | ||
| 205 | */ | ||
| 206 | |||
| 207 | #define SHRINK_BITE 10000 | ||
| 208 | static inline unsigned long __shrink_memory(long tmp) | ||
| 209 | { | ||
| 210 | if (tmp > SHRINK_BITE) | ||
| 211 | tmp = SHRINK_BITE; | ||
| 212 | return shrink_all_memory(tmp); | ||
| 213 | } | ||
| 214 | |||
| 215 | int swsusp_shrink_memory(void) | ||
| 216 | { | ||
| 217 | long tmp; | ||
| 218 | struct zone *zone; | ||
| 219 | unsigned long pages = 0; | ||
| 220 | unsigned int i = 0; | ||
| 221 | char *p = "-\\|/"; | ||
| 222 | struct timeval start, stop; | ||
| 223 | |||
| 224 | printk(KERN_INFO "PM: Shrinking memory... "); | ||
| 225 | do_gettimeofday(&start); | ||
| 226 | do { | ||
| 227 | long size, highmem_size; | ||
| 228 | |||
| 229 | highmem_size = count_highmem_pages(); | ||
| 230 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; | ||
| 231 | tmp = size; | ||
| 232 | size += highmem_size; | ||
| 233 | for_each_populated_zone(zone) { | ||
| 234 | tmp += snapshot_additional_pages(zone); | ||
| 235 | if (is_highmem(zone)) { | ||
| 236 | highmem_size -= | ||
| 237 | zone_page_state(zone, NR_FREE_PAGES); | ||
| 238 | } else { | ||
| 239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); | ||
| 240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | if (highmem_size < 0) | ||
| 245 | highmem_size = 0; | ||
| 246 | |||
| 247 | tmp += highmem_size; | ||
| 248 | if (tmp > 0) { | ||
| 249 | tmp = __shrink_memory(tmp); | ||
| 250 | if (!tmp) | ||
| 251 | return -ENOMEM; | ||
| 252 | pages += tmp; | ||
| 253 | } else if (size > image_size / PAGE_SIZE) { | ||
| 254 | tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); | ||
| 255 | pages += tmp; | ||
| 256 | } | ||
| 257 | printk("\b%c", p[i++%4]); | ||
| 258 | } while (tmp > 0); | ||
| 259 | do_gettimeofday(&stop); | ||
| 260 | printk("\bdone (%lu pages freed)\n", pages); | ||
| 261 | swsusp_show_speed(&start, &stop, pages, "Freed"); | ||
| 262 | |||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | /* | ||
| 267 | * Platforms, like ACPI, may want us to save some memory used by them during | ||
| 268 | * hibernation and to restore the contents of this memory during the subsequent | ||
| 269 | * resume. The code below implements a mechanism allowing us to do that. | ||
| 270 | */ | ||
| 271 | |||
| 272 | struct nvs_page { | ||
| 273 | unsigned long phys_start; | ||
| 274 | unsigned int size; | ||
| 275 | void *kaddr; | ||
| 276 | void *data; | ||
| 277 | struct list_head node; | ||
| 278 | }; | ||
| 279 | |||
| 280 | static LIST_HEAD(nvs_list); | ||
| 281 | |||
| 282 | /** | ||
| 283 | * hibernate_nvs_register - register platform NVS memory region to save | ||
| 284 | * @start - physical address of the region | ||
| 285 | * @size - size of the region | ||
| 286 | * | ||
| 287 | * The NVS region need not be page-aligned (both ends) and we arrange | ||
| 288 | * things so that the data from page-aligned addresses in this region will | ||
| 289 | * be copied into separate RAM pages. | ||
| 290 | */ | ||
| 291 | int hibernate_nvs_register(unsigned long start, unsigned long size) | ||
| 292 | { | ||
| 293 | struct nvs_page *entry, *next; | ||
| 294 | |||
| 295 | while (size > 0) { | ||
| 296 | unsigned int nr_bytes; | ||
| 297 | |||
| 298 | entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); | ||
| 299 | if (!entry) | ||
| 300 | goto Error; | ||
| 301 | |||
| 302 | list_add_tail(&entry->node, &nvs_list); | ||
| 303 | entry->phys_start = start; | ||
| 304 | nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); | ||
| 305 | entry->size = (size < nr_bytes) ? size : nr_bytes; | ||
| 306 | |||
| 307 | start += entry->size; | ||
| 308 | size -= entry->size; | ||
| 309 | } | ||
| 310 | return 0; | ||
| 311 | |||
| 312 | Error: | ||
| 313 | list_for_each_entry_safe(entry, next, &nvs_list, node) { | ||
| 314 | list_del(&entry->node); | ||
| 315 | kfree(entry); | ||
| 316 | } | ||
| 317 | return -ENOMEM; | ||
| 318 | } | ||
| 319 | |||
| 320 | /** | ||
| 321 | * hibernate_nvs_free - free data pages allocated for saving NVS regions | ||
| 322 | */ | ||
| 323 | void hibernate_nvs_free(void) | ||
| 324 | { | ||
| 325 | struct nvs_page *entry; | ||
| 326 | |||
| 327 | list_for_each_entry(entry, &nvs_list, node) | ||
| 328 | if (entry->data) { | ||
| 329 | free_page((unsigned long)entry->data); | ||
| 330 | entry->data = NULL; | ||
| 331 | if (entry->kaddr) { | ||
| 332 | iounmap(entry->kaddr); | ||
| 333 | entry->kaddr = NULL; | ||
| 334 | } | ||
| 335 | } | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions | ||
| 340 | */ | ||
| 341 | int hibernate_nvs_alloc(void) | ||
| 342 | { | ||
| 343 | struct nvs_page *entry; | ||
| 344 | |||
| 345 | list_for_each_entry(entry, &nvs_list, node) { | ||
| 346 | entry->data = (void *)__get_free_page(GFP_KERNEL); | ||
| 347 | if (!entry->data) { | ||
| 348 | hibernate_nvs_free(); | ||
| 349 | return -ENOMEM; | ||
| 350 | } | ||
| 351 | } | ||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | /** | ||
| 356 | * hibernate_nvs_save - save NVS memory regions | ||
| 357 | */ | ||
| 358 | void hibernate_nvs_save(void) | ||
| 359 | { | ||
| 360 | struct nvs_page *entry; | ||
| 361 | |||
| 362 | printk(KERN_INFO "PM: Saving platform NVS memory\n"); | ||
| 363 | |||
| 364 | list_for_each_entry(entry, &nvs_list, node) | ||
| 365 | if (entry->data) { | ||
| 366 | entry->kaddr = ioremap(entry->phys_start, entry->size); | ||
| 367 | memcpy(entry->data, entry->kaddr, entry->size); | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | /** | ||
| 372 | * hibernate_nvs_restore - restore NVS memory regions | ||
| 373 | * | ||
| 374 | * This function is going to be called with interrupts disabled, so it | ||
| 375 | * cannot iounmap the virtual addresses used to access the NVS region. | ||
| 376 | */ | ||
| 377 | void hibernate_nvs_restore(void) | ||
| 378 | { | ||
| 379 | struct nvs_page *entry; | ||
| 380 | |||
| 381 | printk(KERN_INFO "PM: Restoring platform NVS memory\n"); | ||
| 382 | |||
| 383 | list_for_each_entry(entry, &nvs_list, node) | ||
| 384 | if (entry->data) | ||
| 385 | memcpy(entry->kaddr, entry->data, entry->size); | ||
| 386 | } | ||
