diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 11:07:16 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 11:07:16 -0500 |
| commit | dad3de7d0090280f44ff27131ed2878f1ab6ddad (patch) | |
| tree | f887c721761ce845037d96b59542198294a5d3a5 | |
| parent | ed9216c1717a3f3738a77908aff78995ea69e7ff (diff) | |
| parent | 7a1a8eb58a2c6cd819d17332c5a2c369203635d5 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM: Add flag for devices capable of generating run-time wake-up events
PM / Runtime: Remove unnecessary braces in __pm_runtime_set_status()
PM / Runtime: Make documentation of runtime_idle() agree with the code
PM / Runtime: Ensure timer_expires is nonzero in pm_schedule_suspend()
PM / Runtime: Use deferred_resume flag in pm_request_resume
PM / Runtime: Export the PM runtime workqueue
PM / Runtime: Fix lockdep warning in __pm_runtime_set_status()
PM / Hibernate: Swap, use KERN_CONT
PM / Hibernate: Shift remaining code from swsusp.c to hibernate.c
PM / Hibernate: Move swap functions to kernel/power/swap.c.
PM / freezer: Don't get over-anxious while waiting
| -rw-r--r-- | Documentation/power/runtime_pm.txt | 12 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 23 | ||||
| -rw-r--r-- | include/linux/pm.h | 8 | ||||
| -rw-r--r-- | include/linux/pm_runtime.h | 12 | ||||
| -rw-r--r-- | kernel/power/Makefile | 2 | ||||
| -rw-r--r-- | kernel/power/hibernate.c | 30 | ||||
| -rw-r--r-- | kernel/power/main.c | 1 | ||||
| -rw-r--r-- | kernel/power/process.c | 14 | ||||
| -rw-r--r-- | kernel/power/swap.c | 107 | ||||
| -rw-r--r-- | kernel/power/swsusp.c | 130 |
10 files changed, 184 insertions, 155 deletions
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index f49a33b704d2..4a3109b28847 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
| @@ -38,7 +38,7 @@ struct dev_pm_ops { | |||
| 38 | ... | 38 | ... |
| 39 | int (*runtime_suspend)(struct device *dev); | 39 | int (*runtime_suspend)(struct device *dev); |
| 40 | int (*runtime_resume)(struct device *dev); | 40 | int (*runtime_resume)(struct device *dev); |
| 41 | void (*runtime_idle)(struct device *dev); | 41 | int (*runtime_idle)(struct device *dev); |
| 42 | ... | 42 | ... |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| @@ -71,9 +71,9 @@ what to do to handle the device). | |||
| 71 | purpose). | 71 | purpose). |
| 72 | 72 | ||
| 73 | In particular, if the driver requires remote wakeup capability for proper | 73 | In particular, if the driver requires remote wakeup capability for proper |
| 74 | functioning and device_may_wakeup() returns 'false' for the device, then | 74 | functioning and device_run_wake() returns 'false' for the device, then |
| 75 | ->runtime_suspend() should return -EBUSY. On the other hand, if | 75 | ->runtime_suspend() should return -EBUSY. On the other hand, if |
| 76 | device_may_wakeup() returns 'true' for the device and the device is put | 76 | device_run_wake() returns 'true' for the device and the device is put |
| 77 | into a low power state during the execution of its bus type's | 77 | into a low power state during the execution of its bus type's |
| 78 | ->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism | 78 | ->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism |
| 79 | allowing the device to request a change of its power state, such as PCI PME) | 79 | allowing the device to request a change of its power state, such as PCI PME) |
| @@ -114,7 +114,8 @@ The action performed by a bus type's ->runtime_idle() callback is totally | |||
| 114 | dependent on the bus type in question, but the expected and recommended action | 114 | dependent on the bus type in question, but the expected and recommended action |
| 115 | is to check if the device can be suspended (i.e. if all of the conditions | 115 | is to check if the device can be suspended (i.e. if all of the conditions |
| 116 | necessary for suspending the device are satisfied) and to queue up a suspend | 116 | necessary for suspending the device are satisfied) and to queue up a suspend |
| 117 | request for the device in that case. | 117 | request for the device in that case. The value returned by this callback is |
| 118 | ignored by the PM core. | ||
| 118 | 119 | ||
| 119 | The helper functions provided by the PM core, described in Section 4, guarantee | 120 | The helper functions provided by the PM core, described in Section 4, guarantee |
| 120 | that the following constraints are met with respect to the bus type's run-time | 121 | that the following constraints are met with respect to the bus type's run-time |
| @@ -214,6 +215,9 @@ defined in include/linux/pm.h: | |||
| 214 | being executed for that device and it is not practical to wait for the | 215 | being executed for that device and it is not practical to wait for the |
| 215 | suspend to complete; means "start a resume as soon as you've suspended" | 216 | suspend to complete; means "start a resume as soon as you've suspended" |
| 216 | 217 | ||
| 218 | unsigned int run_wake; | ||
| 219 | - set if the device is capable of generating run-time wake-up events | ||
| 220 | |||
| 217 | enum rpm_status runtime_status; | 221 | enum rpm_status runtime_status; |
| 218 | - the run-time PM status of the device; this field's initial value is | 222 | - the run-time PM status of the device; this field's initial value is |
| 219 | RPM_SUSPENDED, which means that each device is initially regarded by the | 223 | RPM_SUSPENDED, which means that each device is initially regarded by the |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 846d89e3d122..5a01ecef4af3 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -185,6 +185,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | dev->power.runtime_status = RPM_SUSPENDING; | 187 | dev->power.runtime_status = RPM_SUSPENDING; |
| 188 | dev->power.deferred_resume = false; | ||
| 188 | 189 | ||
| 189 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 190 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
| 190 | spin_unlock_irq(&dev->power.lock); | 191 | spin_unlock_irq(&dev->power.lock); |
| @@ -200,7 +201,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 200 | if (retval) { | 201 | if (retval) { |
| 201 | dev->power.runtime_status = RPM_ACTIVE; | 202 | dev->power.runtime_status = RPM_ACTIVE; |
| 202 | pm_runtime_cancel_pending(dev); | 203 | pm_runtime_cancel_pending(dev); |
| 203 | dev->power.deferred_resume = false; | ||
| 204 | 204 | ||
| 205 | if (retval == -EAGAIN || retval == -EBUSY) { | 205 | if (retval == -EAGAIN || retval == -EBUSY) { |
| 206 | notify = true; | 206 | notify = true; |
| @@ -217,7 +217,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 217 | wake_up_all(&dev->power.wait_queue); | 217 | wake_up_all(&dev->power.wait_queue); |
| 218 | 218 | ||
| 219 | if (dev->power.deferred_resume) { | 219 | if (dev->power.deferred_resume) { |
| 220 | dev->power.deferred_resume = false; | ||
| 221 | __pm_runtime_resume(dev, false); | 220 | __pm_runtime_resume(dev, false); |
| 222 | retval = -EAGAIN; | 221 | retval = -EAGAIN; |
| 223 | goto out; | 222 | goto out; |
| @@ -626,6 +625,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
| 626 | goto out; | 625 | goto out; |
| 627 | 626 | ||
| 628 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 627 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
| 628 | if (!dev->power.timer_expires) | ||
| 629 | dev->power.timer_expires = 1; | ||
| 629 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 630 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
| 630 | 631 | ||
| 631 | out: | 632 | out: |
| @@ -659,13 +660,17 @@ static int __pm_request_resume(struct device *dev) | |||
| 659 | 660 | ||
| 660 | pm_runtime_deactivate_timer(dev); | 661 | pm_runtime_deactivate_timer(dev); |
| 661 | 662 | ||
| 663 | if (dev->power.runtime_status == RPM_SUSPENDING) { | ||
| 664 | dev->power.deferred_resume = true; | ||
| 665 | return retval; | ||
| 666 | } | ||
| 662 | if (dev->power.request_pending) { | 667 | if (dev->power.request_pending) { |
| 663 | /* If non-resume request is pending, we can overtake it. */ | 668 | /* If non-resume request is pending, we can overtake it. */ |
| 664 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | 669 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; |
| 665 | return retval; | 670 | return retval; |
| 666 | } else if (retval) { | ||
| 667 | return retval; | ||
| 668 | } | 671 | } |
| 672 | if (retval) | ||
| 673 | return retval; | ||
| 669 | 674 | ||
| 670 | dev->power.request = RPM_REQ_RESUME; | 675 | dev->power.request = RPM_REQ_RESUME; |
| 671 | dev->power.request_pending = true; | 676 | dev->power.request_pending = true; |
| @@ -777,7 +782,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
| 777 | } | 782 | } |
| 778 | 783 | ||
| 779 | if (parent) { | 784 | if (parent) { |
| 780 | spin_lock(&parent->power.lock); | 785 | spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); |
| 781 | 786 | ||
| 782 | /* | 787 | /* |
| 783 | * It is invalid to put an active child under a parent that is | 788 | * It is invalid to put an active child under a parent that is |
| @@ -786,12 +791,10 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
| 786 | */ | 791 | */ |
| 787 | if (!parent->power.disable_depth | 792 | if (!parent->power.disable_depth |
| 788 | && !parent->power.ignore_children | 793 | && !parent->power.ignore_children |
| 789 | && parent->power.runtime_status != RPM_ACTIVE) { | 794 | && parent->power.runtime_status != RPM_ACTIVE) |
| 790 | error = -EBUSY; | 795 | error = -EBUSY; |
| 791 | } else { | 796 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
| 792 | if (dev->power.runtime_status == RPM_SUSPENDED) | 797 | atomic_inc(&parent->power.child_count); |
| 793 | atomic_inc(&parent->power.child_count); | ||
| 794 | } | ||
| 795 | 798 | ||
| 796 | spin_unlock(&parent->power.lock); | 799 | spin_unlock(&parent->power.lock); |
| 797 | 800 | ||
diff --git a/include/linux/pm.h b/include/linux/pm.h index 3b7e04b95bd2..0d65934246af 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -178,9 +178,10 @@ typedef struct pm_message { | |||
| 178 | * This need not mean that the device should be put into a low power state. | 178 | * This need not mean that the device should be put into a low power state. |
| 179 | * For example, if the device is behind a link which is about to be turned | 179 | * For example, if the device is behind a link which is about to be turned |
| 180 | * off, the device may remain at full power. If the device does go to low | 180 | * off, the device may remain at full power. If the device does go to low |
| 181 | * power and if device_may_wakeup(dev) is true, remote wake-up (i.e., a | 181 | * power and is capable of generating run-time wake-up events, remote |
| 182 | * hardware mechanism allowing the device to request a change of its power | 182 | * wake-up (i.e., a hardware mechanism allowing the device to request a |
| 183 | * state, such as PCI PME) should be enabled for it. | 183 | * change of its power state via a wake-up event, such as PCI PME) should |
| 184 | * be enabled for it. | ||
| 184 | * | 185 | * |
| 185 | * @runtime_resume: Put the device into the fully active state in response to a | 186 | * @runtime_resume: Put the device into the fully active state in response to a |
| 186 | * wake-up event generated by hardware or at the request of software. If | 187 | * wake-up event generated by hardware or at the request of software. If |
| @@ -428,6 +429,7 @@ struct dev_pm_info { | |||
| 428 | unsigned int idle_notification:1; | 429 | unsigned int idle_notification:1; |
| 429 | unsigned int request_pending:1; | 430 | unsigned int request_pending:1; |
| 430 | unsigned int deferred_resume:1; | 431 | unsigned int deferred_resume:1; |
| 432 | unsigned int run_wake:1; | ||
| 431 | enum rpm_request request; | 433 | enum rpm_request request; |
| 432 | enum rpm_status runtime_status; | 434 | enum rpm_status runtime_status; |
| 433 | int runtime_error; | 435 | int runtime_error; |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 44087044910f..370ce0a6fe4a 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
| @@ -50,6 +50,16 @@ static inline void pm_runtime_put_noidle(struct device *dev) | |||
| 50 | atomic_add_unless(&dev->power.usage_count, -1, 0); | 50 | atomic_add_unless(&dev->power.usage_count, -1, 0); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static inline bool device_run_wake(struct device *dev) | ||
| 54 | { | ||
| 55 | return dev->power.run_wake; | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void device_set_run_wake(struct device *dev, bool enable) | ||
| 59 | { | ||
| 60 | dev->power.run_wake = enable; | ||
| 61 | } | ||
| 62 | |||
| 53 | #else /* !CONFIG_PM_RUNTIME */ | 63 | #else /* !CONFIG_PM_RUNTIME */ |
| 54 | 64 | ||
| 55 | static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } | 65 | static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } |
| @@ -73,6 +83,8 @@ static inline bool pm_children_suspended(struct device *dev) { return false; } | |||
| 73 | static inline void pm_suspend_ignore_children(struct device *dev, bool en) {} | 83 | static inline void pm_suspend_ignore_children(struct device *dev, bool en) {} |
| 74 | static inline void pm_runtime_get_noresume(struct device *dev) {} | 84 | static inline void pm_runtime_get_noresume(struct device *dev) {} |
| 75 | static inline void pm_runtime_put_noidle(struct device *dev) {} | 85 | static inline void pm_runtime_put_noidle(struct device *dev) {} |
| 86 | static inline bool device_run_wake(struct device *dev) { return false; } | ||
| 87 | static inline void device_set_run_wake(struct device *dev, bool enable) {} | ||
| 76 | 88 | ||
| 77 | #endif /* !CONFIG_PM_RUNTIME */ | 89 | #endif /* !CONFIG_PM_RUNTIME */ |
| 78 | 90 | ||
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index c3b81c30e5d5..43191815f874 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
| @@ -8,7 +8,7 @@ obj-$(CONFIG_PM_SLEEP) += console.o | |||
| 8 | obj-$(CONFIG_FREEZER) += process.o | 8 | obj-$(CONFIG_FREEZER) += process.o |
| 9 | obj-$(CONFIG_SUSPEND) += suspend.o | 9 | obj-$(CONFIG_SUSPEND) += suspend.o |
| 10 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o | 10 | obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
| 11 | obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o | 11 | obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o |
| 12 | obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o | 12 | obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o |
| 13 | 13 | ||
| 14 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 14 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 04a9e90d248f..bbfe472d7524 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
| @@ -32,6 +32,7 @@ static int noresume = 0; | |||
| 32 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; | 32 | static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
| 33 | dev_t swsusp_resume_device; | 33 | dev_t swsusp_resume_device; |
| 34 | sector_t swsusp_resume_block; | 34 | sector_t swsusp_resume_block; |
| 35 | int in_suspend __nosavedata = 0; | ||
| 35 | 36 | ||
| 36 | enum { | 37 | enum { |
| 37 | HIBERNATION_INVALID, | 38 | HIBERNATION_INVALID, |
| @@ -202,6 +203,35 @@ static void platform_recover(int platform_mode) | |||
| 202 | } | 203 | } |
| 203 | 204 | ||
| 204 | /** | 205 | /** |
| 206 | * swsusp_show_speed - print the time elapsed between two events. | ||
| 207 | * @start: Starting event. | ||
| 208 | * @stop: Final event. | ||
| 209 | * @nr_pages - number of pages processed between @start and @stop | ||
| 210 | * @msg - introductory message to print | ||
| 211 | */ | ||
| 212 | |||
| 213 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, | ||
| 214 | unsigned nr_pages, char *msg) | ||
| 215 | { | ||
| 216 | s64 elapsed_centisecs64; | ||
| 217 | int centisecs; | ||
| 218 | int k; | ||
| 219 | int kps; | ||
| 220 | |||
| 221 | elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); | ||
| 222 | do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); | ||
| 223 | centisecs = elapsed_centisecs64; | ||
| 224 | if (centisecs == 0) | ||
| 225 | centisecs = 1; /* avoid div-by-zero */ | ||
| 226 | k = nr_pages * (PAGE_SIZE / 1024); | ||
| 227 | kps = (k * 100) / centisecs; | ||
| 228 | printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", | ||
| 229 | msg, k, | ||
| 230 | centisecs / 100, centisecs % 100, | ||
| 231 | kps / 1000, (kps % 1000) / 10); | ||
| 232 | } | ||
| 233 | |||
| 234 | /** | ||
| 205 | * create_image - freeze devices that need to be frozen with interrupts | 235 | * create_image - freeze devices that need to be frozen with interrupts |
| 206 | * off, create the hibernation image and thaw those devices. Control | 236 | * off, create the hibernation image and thaw those devices. Control |
| 207 | * reappears in this routine after a restore. | 237 | * reappears in this routine after a restore. |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 347d2cc88cd0..0998c7139053 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -220,6 +220,7 @@ static struct attribute_group attr_group = { | |||
| 220 | 220 | ||
| 221 | #ifdef CONFIG_PM_RUNTIME | 221 | #ifdef CONFIG_PM_RUNTIME |
| 222 | struct workqueue_struct *pm_wq; | 222 | struct workqueue_struct *pm_wq; |
| 223 | EXPORT_SYMBOL_GPL(pm_wq); | ||
| 223 | 224 | ||
| 224 | static int __init pm_start_workqueue(void) | 225 | static int __init pm_start_workqueue(void) |
| 225 | { | 226 | { |
diff --git a/kernel/power/process.c b/kernel/power/process.c index cc2e55373b68..5ade1bdcf366 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
| 16 | #include <linux/freezer.h> | 16 | #include <linux/freezer.h> |
| 17 | #include <linux/delay.h> | ||
| 17 | 18 | ||
| 18 | /* | 19 | /* |
| 19 | * Timeout for stopping processes | 20 | * Timeout for stopping processes |
| @@ -41,7 +42,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
| 41 | do_gettimeofday(&start); | 42 | do_gettimeofday(&start); |
| 42 | 43 | ||
| 43 | end_time = jiffies + TIMEOUT; | 44 | end_time = jiffies + TIMEOUT; |
| 44 | do { | 45 | while (true) { |
| 45 | todo = 0; | 46 | todo = 0; |
| 46 | read_lock(&tasklist_lock); | 47 | read_lock(&tasklist_lock); |
| 47 | do_each_thread(g, p) { | 48 | do_each_thread(g, p) { |
| @@ -62,10 +63,15 @@ static int try_to_freeze_tasks(bool sig_only) | |||
| 62 | todo++; | 63 | todo++; |
| 63 | } while_each_thread(g, p); | 64 | } while_each_thread(g, p); |
| 64 | read_unlock(&tasklist_lock); | 65 | read_unlock(&tasklist_lock); |
| 65 | yield(); /* Yield is okay here */ | 66 | if (!todo || time_after(jiffies, end_time)) |
| 66 | if (time_after(jiffies, end_time)) | ||
| 67 | break; | 67 | break; |
| 68 | } while (todo); | 68 | |
| 69 | /* | ||
| 70 | * We need to retry, but first give the freezing tasks some | ||
| 71 | * time to enter the regrigerator. | ||
| 72 | */ | ||
| 73 | msleep(10); | ||
| 74 | } | ||
| 69 | 75 | ||
| 70 | do_gettimeofday(&end); | 76 | do_gettimeofday(&end); |
| 71 | elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); | 77 | elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 890f6b11b1d3..09b2b0ae9e9d 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -38,6 +38,107 @@ struct swsusp_header { | |||
| 38 | 38 | ||
| 39 | static struct swsusp_header *swsusp_header; | 39 | static struct swsusp_header *swsusp_header; |
| 40 | 40 | ||
| 41 | /** | ||
| 42 | * The following functions are used for tracing the allocated | ||
| 43 | * swap pages, so that they can be freed in case of an error. | ||
| 44 | */ | ||
| 45 | |||
| 46 | struct swsusp_extent { | ||
| 47 | struct rb_node node; | ||
| 48 | unsigned long start; | ||
| 49 | unsigned long end; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static struct rb_root swsusp_extents = RB_ROOT; | ||
| 53 | |||
| 54 | static int swsusp_extents_insert(unsigned long swap_offset) | ||
| 55 | { | ||
| 56 | struct rb_node **new = &(swsusp_extents.rb_node); | ||
| 57 | struct rb_node *parent = NULL; | ||
| 58 | struct swsusp_extent *ext; | ||
| 59 | |||
| 60 | /* Figure out where to put the new node */ | ||
| 61 | while (*new) { | ||
| 62 | ext = container_of(*new, struct swsusp_extent, node); | ||
| 63 | parent = *new; | ||
| 64 | if (swap_offset < ext->start) { | ||
| 65 | /* Try to merge */ | ||
| 66 | if (swap_offset == ext->start - 1) { | ||
| 67 | ext->start--; | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | new = &((*new)->rb_left); | ||
| 71 | } else if (swap_offset > ext->end) { | ||
| 72 | /* Try to merge */ | ||
| 73 | if (swap_offset == ext->end + 1) { | ||
| 74 | ext->end++; | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | new = &((*new)->rb_right); | ||
| 78 | } else { | ||
| 79 | /* It already is in the tree */ | ||
| 80 | return -EINVAL; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | /* Add the new node and rebalance the tree. */ | ||
| 84 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); | ||
| 85 | if (!ext) | ||
| 86 | return -ENOMEM; | ||
| 87 | |||
| 88 | ext->start = swap_offset; | ||
| 89 | ext->end = swap_offset; | ||
| 90 | rb_link_node(&ext->node, parent, new); | ||
| 91 | rb_insert_color(&ext->node, &swsusp_extents); | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | /** | ||
| 96 | * alloc_swapdev_block - allocate a swap page and register that it has | ||
| 97 | * been allocated, so that it can be freed in case of an error. | ||
| 98 | */ | ||
| 99 | |||
| 100 | sector_t alloc_swapdev_block(int swap) | ||
| 101 | { | ||
| 102 | unsigned long offset; | ||
| 103 | |||
| 104 | offset = swp_offset(get_swap_page_of_type(swap)); | ||
| 105 | if (offset) { | ||
| 106 | if (swsusp_extents_insert(offset)) | ||
| 107 | swap_free(swp_entry(swap, offset)); | ||
| 108 | else | ||
| 109 | return swapdev_block(swap, offset); | ||
| 110 | } | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | /** | ||
| 115 | * free_all_swap_pages - free swap pages allocated for saving image data. | ||
| 116 | * It also frees the extents used to register which swap entres had been | ||
| 117 | * allocated. | ||
| 118 | */ | ||
| 119 | |||
| 120 | void free_all_swap_pages(int swap) | ||
| 121 | { | ||
| 122 | struct rb_node *node; | ||
| 123 | |||
| 124 | while ((node = swsusp_extents.rb_node)) { | ||
| 125 | struct swsusp_extent *ext; | ||
| 126 | unsigned long offset; | ||
| 127 | |||
| 128 | ext = container_of(node, struct swsusp_extent, node); | ||
| 129 | rb_erase(node, &swsusp_extents); | ||
| 130 | for (offset = ext->start; offset <= ext->end; offset++) | ||
| 131 | swap_free(swp_entry(swap, offset)); | ||
| 132 | |||
| 133 | kfree(ext); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | int swsusp_swap_in_use(void) | ||
| 138 | { | ||
| 139 | return (swsusp_extents.rb_node != NULL); | ||
| 140 | } | ||
| 141 | |||
| 41 | /* | 142 | /* |
| 42 | * General things | 143 | * General things |
| 43 | */ | 144 | */ |
| @@ -336,7 +437,7 @@ static int save_image(struct swap_map_handle *handle, | |||
| 336 | if (ret) | 437 | if (ret) |
| 337 | break; | 438 | break; |
| 338 | if (!(nr_pages % m)) | 439 | if (!(nr_pages % m)) |
| 339 | printk("\b\b\b\b%3d%%", nr_pages / m); | 440 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); |
| 340 | nr_pages++; | 441 | nr_pages++; |
| 341 | } | 442 | } |
| 342 | err2 = wait_on_bio_chain(&bio); | 443 | err2 = wait_on_bio_chain(&bio); |
| @@ -344,9 +445,9 @@ static int save_image(struct swap_map_handle *handle, | |||
| 344 | if (!ret) | 445 | if (!ret) |
| 345 | ret = err2; | 446 | ret = err2; |
| 346 | if (!ret) | 447 | if (!ret) |
| 347 | printk("\b\b\b\bdone\n"); | 448 | printk(KERN_CONT "\b\b\b\bdone\n"); |
| 348 | else | 449 | else |
| 349 | printk("\n"); | 450 | printk(KERN_CONT "\n"); |
| 350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 451 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
| 351 | return ret; | 452 | return ret; |
| 352 | } | 453 | } |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 6a07f4dbf2f8..5b3601bd1893 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
| @@ -56,133 +56,3 @@ | |||
| 56 | #include "power.h" | 56 | #include "power.h" |
| 57 | 57 | ||
| 58 | int in_suspend __nosavedata = 0; | 58 | int in_suspend __nosavedata = 0; |
| 59 | |||
| 60 | /** | ||
| 61 | * The following functions are used for tracing the allocated | ||
| 62 | * swap pages, so that they can be freed in case of an error. | ||
| 63 | */ | ||
| 64 | |||
| 65 | struct swsusp_extent { | ||
| 66 | struct rb_node node; | ||
| 67 | unsigned long start; | ||
| 68 | unsigned long end; | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct rb_root swsusp_extents = RB_ROOT; | ||
| 72 | |||
| 73 | static int swsusp_extents_insert(unsigned long swap_offset) | ||
| 74 | { | ||
| 75 | struct rb_node **new = &(swsusp_extents.rb_node); | ||
| 76 | struct rb_node *parent = NULL; | ||
| 77 | struct swsusp_extent *ext; | ||
| 78 | |||
| 79 | /* Figure out where to put the new node */ | ||
| 80 | while (*new) { | ||
| 81 | ext = container_of(*new, struct swsusp_extent, node); | ||
| 82 | parent = *new; | ||
| 83 | if (swap_offset < ext->start) { | ||
| 84 | /* Try to merge */ | ||
| 85 | if (swap_offset == ext->start - 1) { | ||
| 86 | ext->start--; | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | new = &((*new)->rb_left); | ||
| 90 | } else if (swap_offset > ext->end) { | ||
| 91 | /* Try to merge */ | ||
| 92 | if (swap_offset == ext->end + 1) { | ||
| 93 | ext->end++; | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | new = &((*new)->rb_right); | ||
| 97 | } else { | ||
| 98 | /* It already is in the tree */ | ||
| 99 | return -EINVAL; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | /* Add the new node and rebalance the tree. */ | ||
| 103 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); | ||
| 104 | if (!ext) | ||
| 105 | return -ENOMEM; | ||
| 106 | |||
| 107 | ext->start = swap_offset; | ||
| 108 | ext->end = swap_offset; | ||
| 109 | rb_link_node(&ext->node, parent, new); | ||
| 110 | rb_insert_color(&ext->node, &swsusp_extents); | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | /** | ||
| 115 | * alloc_swapdev_block - allocate a swap page and register that it has | ||
| 116 | * been allocated, so that it can be freed in case of an error. | ||
| 117 | */ | ||
| 118 | |||
| 119 | sector_t alloc_swapdev_block(int swap) | ||
| 120 | { | ||
| 121 | unsigned long offset; | ||
| 122 | |||
| 123 | offset = swp_offset(get_swap_page_of_type(swap)); | ||
| 124 | if (offset) { | ||
| 125 | if (swsusp_extents_insert(offset)) | ||
| 126 | swap_free(swp_entry(swap, offset)); | ||
| 127 | else | ||
| 128 | return swapdev_block(swap, offset); | ||
| 129 | } | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | /** | ||
| 134 | * free_all_swap_pages - free swap pages allocated for saving image data. | ||
| 135 | * It also frees the extents used to register which swap entres had been | ||
| 136 | * allocated. | ||
| 137 | */ | ||
| 138 | |||
| 139 | void free_all_swap_pages(int swap) | ||
| 140 | { | ||
| 141 | struct rb_node *node; | ||
| 142 | |||
| 143 | while ((node = swsusp_extents.rb_node)) { | ||
| 144 | struct swsusp_extent *ext; | ||
| 145 | unsigned long offset; | ||
| 146 | |||
| 147 | ext = container_of(node, struct swsusp_extent, node); | ||
| 148 | rb_erase(node, &swsusp_extents); | ||
| 149 | for (offset = ext->start; offset <= ext->end; offset++) | ||
| 150 | swap_free(swp_entry(swap, offset)); | ||
| 151 | |||
| 152 | kfree(ext); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | int swsusp_swap_in_use(void) | ||
| 157 | { | ||
| 158 | return (swsusp_extents.rb_node != NULL); | ||
| 159 | } | ||
| 160 | |||
| 161 | /** | ||
| 162 | * swsusp_show_speed - print the time elapsed between two events represented by | ||
| 163 | * @start and @stop | ||
| 164 | * | ||
| 165 | * @nr_pages - number of pages processed between @start and @stop | ||
| 166 | * @msg - introductory message to print | ||
| 167 | */ | ||
| 168 | |||
| 169 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, | ||
| 170 | unsigned nr_pages, char *msg) | ||
| 171 | { | ||
| 172 | s64 elapsed_centisecs64; | ||
| 173 | int centisecs; | ||
| 174 | int k; | ||
| 175 | int kps; | ||
| 176 | |||
| 177 | elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); | ||
| 178 | do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); | ||
| 179 | centisecs = elapsed_centisecs64; | ||
| 180 | if (centisecs == 0) | ||
| 181 | centisecs = 1; /* avoid div-by-zero */ | ||
| 182 | k = nr_pages * (PAGE_SIZE / 1024); | ||
| 183 | kps = (k * 100) / centisecs; | ||
| 184 | printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", | ||
| 185 | msg, k, | ||
| 186 | centisecs / 100, centisecs % 100, | ||
| 187 | kps / 1000, (kps % 1000) / 10); | ||
| 188 | } | ||
