diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-11-12 19:41:20 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-11-12 19:41:20 -0500 |
commit | 05d658b5b57214944067fb4f62bce59200bf496f (patch) | |
tree | f2aa2537874ab5c4e37e6e7073e857150e9e1f08 | |
parent | 794c33555f704047e4014d4601b3972cfbfe7e50 (diff) | |
parent | 2dd9789c76ffde05d5f4c56f45c3cb71b3936694 (diff) |
Merge branch 'pm-sleep'
* pm-sleep:
freezer: Fix typo in freezable_schedule_timeout() comment
PM / s2idle: Clear the events_check_enabled flag
PM / sleep: Remove pm_complete_with_resume_check()
PM: ARM: locomo: Drop suspend and resume bus type callbacks
PM: Use a more common logging style
PM: Document rules on using pm_runtime_resume() in system suspend callbacks
-rw-r--r-- | Documentation/driver-api/pm/devices.rst | 25 | ||||
-rw-r--r-- | arch/arm/common/locomo.c | 24 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/locomo.h | 2 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 23 | ||||
-rw-r--r-- | include/linux/freezer.h | 2 | ||||
-rw-r--r-- | include/linux/pm.h | 1 | ||||
-rw-r--r-- | kernel/power/qos.c | 4 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 35 | ||||
-rw-r--r-- | kernel/power/suspend.c | 2 | ||||
-rw-r--r-- | kernel/power/swap.c | 128 |
10 files changed, 103 insertions, 143 deletions
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index b8f1e3bdb743..b5d7d4948e93 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst | |||
@@ -328,7 +328,10 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. | |||
328 | After the ``->prepare`` callback method returns, no new children may be | 328 | After the ``->prepare`` callback method returns, no new children may be |
329 | registered below the device. The method may also prepare the device or | 329 | registered below the device. The method may also prepare the device or |
330 | driver in some way for the upcoming system power transition, but it | 330 | driver in some way for the upcoming system power transition, but it |
331 | should not put the device into a low-power state. | 331 | should not put the device into a low-power state. Moreover, if the |
332 | device supports runtime power management, the ``->prepare`` callback | ||
333 | method must not update its state in case it is necessary to resume it | ||
334 | from runtime suspend later on. | ||
332 | 335 | ||
333 | For devices supporting runtime power management, the return value of the | 336 | For devices supporting runtime power management, the return value of the |
334 | prepare callback can be used to indicate to the PM core that it may | 337 | prepare callback can be used to indicate to the PM core that it may |
@@ -356,6 +359,16 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. | |||
356 | the appropriate low-power state, depending on the bus type the device is | 359 | the appropriate low-power state, depending on the bus type the device is |
357 | on, and they may enable wakeup events. | 360 | on, and they may enable wakeup events. |
358 | 361 | ||
362 | However, for devices supporting runtime power management, the | ||
363 | ``->suspend`` methods provided by subsystems (bus types and PM domains | ||
364 | in particular) must follow an additional rule regarding what can be done | ||
365 | to the devices before their drivers' ``->suspend`` methods are called. | ||
366 | Namely, they can only resume the devices from runtime suspend by | ||
367 | calling :c:func:`pm_runtime_resume` for them, if that is necessary, and | ||
368 | they must not update the state of the devices in any other way at that | ||
369 | time (in case the drivers need to resume the devices from runtime | ||
370 | suspend in their ``->suspend`` methods). | ||
371 | |||
359 | 3. For a number of devices it is convenient to split suspend into the | 372 | 3. For a number of devices it is convenient to split suspend into the |
360 | "quiesce device" and "save device state" phases, in which cases | 373 | "quiesce device" and "save device state" phases, in which cases |
361 | ``suspend_late`` is meant to do the latter. It is always executed after | 374 | ``suspend_late`` is meant to do the latter. It is always executed after |
@@ -729,6 +742,16 @@ state temporarily, for example so that its system wakeup capability can be | |||
729 | disabled. This all depends on the hardware and the design of the subsystem and | 742 | disabled. This all depends on the hardware and the design of the subsystem and |
730 | device driver in question. | 743 | device driver in question. |
731 | 744 | ||
745 | If it is necessary to resume a device from runtime suspend during a system-wide | ||
746 | transition into a sleep state, that can be done by calling | ||
747 | :c:func:`pm_runtime_resume` for it from the ``->suspend`` callback (or its | ||
748 | couterpart for transitions related to hibernation) of either the device's driver | ||
749 | or a subsystem responsible for it (for example, a bus type or a PM domain). | ||
750 | That is guaranteed to work by the requirement that subsystems must not change | ||
751 | the state of devices (possibly except for resuming them from runtime suspend) | ||
752 | from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before* | ||
753 | invoking device drivers' ``->suspend`` callbacks (or equivalent). | ||
754 | |||
732 | During system-wide resume from a sleep state it's easiest to put devices into | 755 | During system-wide resume from a sleep state it's easiest to put devices into |
733 | the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. | 756 | the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. |
734 | Refer to that document for more information regarding this particular issue as | 757 | Refer to that document for more information regarding this particular issue as |
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 6c7b06854fce..51936bde1eb2 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c | |||
@@ -826,28 +826,6 @@ static int locomo_match(struct device *_dev, struct device_driver *_drv) | |||
826 | return dev->devid == drv->devid; | 826 | return dev->devid == drv->devid; |
827 | } | 827 | } |
828 | 828 | ||
829 | static int locomo_bus_suspend(struct device *dev, pm_message_t state) | ||
830 | { | ||
831 | struct locomo_dev *ldev = LOCOMO_DEV(dev); | ||
832 | struct locomo_driver *drv = LOCOMO_DRV(dev->driver); | ||
833 | int ret = 0; | ||
834 | |||
835 | if (drv && drv->suspend) | ||
836 | ret = drv->suspend(ldev, state); | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | static int locomo_bus_resume(struct device *dev) | ||
841 | { | ||
842 | struct locomo_dev *ldev = LOCOMO_DEV(dev); | ||
843 | struct locomo_driver *drv = LOCOMO_DRV(dev->driver); | ||
844 | int ret = 0; | ||
845 | |||
846 | if (drv && drv->resume) | ||
847 | ret = drv->resume(ldev); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | static int locomo_bus_probe(struct device *dev) | 829 | static int locomo_bus_probe(struct device *dev) |
852 | { | 830 | { |
853 | struct locomo_dev *ldev = LOCOMO_DEV(dev); | 831 | struct locomo_dev *ldev = LOCOMO_DEV(dev); |
@@ -875,8 +853,6 @@ struct bus_type locomo_bus_type = { | |||
875 | .match = locomo_match, | 853 | .match = locomo_match, |
876 | .probe = locomo_bus_probe, | 854 | .probe = locomo_bus_probe, |
877 | .remove = locomo_bus_remove, | 855 | .remove = locomo_bus_remove, |
878 | .suspend = locomo_bus_suspend, | ||
879 | .resume = locomo_bus_resume, | ||
880 | }; | 856 | }; |
881 | 857 | ||
882 | int locomo_driver_register(struct locomo_driver *driver) | 858 | int locomo_driver_register(struct locomo_driver *driver) |
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 74e51d6bd93f..f8712e3c29cf 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h | |||
@@ -189,8 +189,6 @@ struct locomo_driver { | |||
189 | unsigned int devid; | 189 | unsigned int devid; |
190 | int (*probe)(struct locomo_dev *); | 190 | int (*probe)(struct locomo_dev *); |
191 | int (*remove)(struct locomo_dev *); | 191 | int (*remove)(struct locomo_dev *); |
192 | int (*suspend)(struct locomo_dev *, pm_message_t); | ||
193 | int (*resume)(struct locomo_dev *); | ||
194 | }; | 192 | }; |
195 | 193 | ||
196 | #define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv) | 194 | #define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv) |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 07c3c4a9522d..b2ed606265a8 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/pm_runtime.h> | 10 | #include <linux/pm_runtime.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/suspend.h> | ||
13 | 12 | ||
14 | #ifdef CONFIG_PM | 13 | #ifdef CONFIG_PM |
15 | /** | 14 | /** |
@@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev) | |||
298 | if (drv && drv->pm && drv->pm->complete) | 297 | if (drv && drv->pm && drv->pm->complete) |
299 | drv->pm->complete(dev); | 298 | drv->pm->complete(dev); |
300 | } | 299 | } |
301 | |||
302 | /** | ||
303 | * pm_complete_with_resume_check - Complete a device power transition. | ||
304 | * @dev: Device to handle. | ||
305 | * | ||
306 | * Complete a device power transition during a system-wide power transition and | ||
307 | * optionally schedule a runtime resume of the device if the system resume in | ||
308 | * progress has been initated by the platform firmware and the device had its | ||
309 | * power.direct_complete flag set. | ||
310 | */ | ||
311 | void pm_complete_with_resume_check(struct device *dev) | ||
312 | { | ||
313 | pm_generic_complete(dev); | ||
314 | /* | ||
315 | * If the device had been runtime-suspended before the system went into | ||
316 | * the sleep state it is going out of and it has never been resumed till | ||
317 | * now, resume it in case the firmware powered it up. | ||
318 | */ | ||
319 | if (dev->power.direct_complete && pm_resume_via_firmware()) | ||
320 | pm_request_resume(dev); | ||
321 | } | ||
322 | EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); | ||
323 | #endif /* CONFIG_PM_SLEEP */ | 300 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 3995df1d068f..21f5aa0b217f 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -182,7 +182,7 @@ static inline void freezable_schedule_unsafe(void) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * Like freezable_schedule_timeout(), but should not block the freezer. Do not | 185 | * Like schedule_timeout(), but should not block the freezer. Do not |
186 | * call this with locks held. | 186 | * call this with locks held. |
187 | */ | 187 | */ |
188 | static inline long freezable_schedule_timeout(long timeout) | 188 | static inline long freezable_schedule_timeout(long timeout) |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 47ded8aa8a5d..a0ceeccf2846 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -736,7 +736,6 @@ extern int pm_generic_poweroff_noirq(struct device *dev); | |||
736 | extern int pm_generic_poweroff_late(struct device *dev); | 736 | extern int pm_generic_poweroff_late(struct device *dev); |
737 | extern int pm_generic_poweroff(struct device *dev); | 737 | extern int pm_generic_poweroff(struct device *dev); |
738 | extern void pm_generic_complete(struct device *dev); | 738 | extern void pm_generic_complete(struct device *dev); |
739 | extern void pm_complete_with_resume_check(struct device *dev); | ||
740 | 739 | ||
741 | #else /* !CONFIG_PM_SLEEP */ | 740 | #else /* !CONFIG_PM_SLEEP */ |
742 | 741 | ||
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 97b0df71303e..9d7503910ce2 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -701,8 +701,8 @@ static int __init pm_qos_power_init(void) | |||
701 | for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) { | 701 | for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) { |
702 | ret = register_pm_qos_misc(pm_qos_array[i], d); | 702 | ret = register_pm_qos_misc(pm_qos_array[i], d); |
703 | if (ret < 0) { | 703 | if (ret < 0) { |
704 | printk(KERN_ERR "pm_qos_param: %s setup failed\n", | 704 | pr_err("%s: %s setup failed\n", |
705 | pm_qos_array[i]->name); | 705 | __func__, pm_qos_array[i]->name); |
706 | return ret; | 706 | return ret; |
707 | } | 707 | } |
708 | } | 708 | } |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0972a8e09d08..a917a301e201 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) "PM: " fmt | ||
14 | |||
13 | #include <linux/version.h> | 15 | #include <linux/version.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
@@ -967,7 +969,7 @@ void __init __register_nosave_region(unsigned long start_pfn, | |||
967 | region->end_pfn = end_pfn; | 969 | region->end_pfn = end_pfn; |
968 | list_add_tail(®ion->list, &nosave_regions); | 970 | list_add_tail(®ion->list, &nosave_regions); |
969 | Report: | 971 | Report: |
970 | printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n", | 972 | pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n", |
971 | (unsigned long long) start_pfn << PAGE_SHIFT, | 973 | (unsigned long long) start_pfn << PAGE_SHIFT, |
972 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); | 974 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); |
973 | } | 975 | } |
@@ -1039,7 +1041,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm) | |||
1039 | list_for_each_entry(region, &nosave_regions, list) { | 1041 | list_for_each_entry(region, &nosave_regions, list) { |
1040 | unsigned long pfn; | 1042 | unsigned long pfn; |
1041 | 1043 | ||
1042 | pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", | 1044 | pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n", |
1043 | (unsigned long long) region->start_pfn << PAGE_SHIFT, | 1045 | (unsigned long long) region->start_pfn << PAGE_SHIFT, |
1044 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) | 1046 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) |
1045 | - 1); | 1047 | - 1); |
@@ -1095,7 +1097,7 @@ int create_basic_memory_bitmaps(void) | |||
1095 | free_pages_map = bm2; | 1097 | free_pages_map = bm2; |
1096 | mark_nosave_pages(forbidden_pages_map); | 1098 | mark_nosave_pages(forbidden_pages_map); |
1097 | 1099 | ||
1098 | pr_debug("PM: Basic memory bitmaps created\n"); | 1100 | pr_debug("Basic memory bitmaps created\n"); |
1099 | 1101 | ||
1100 | return 0; | 1102 | return 0; |
1101 | 1103 | ||
@@ -1131,7 +1133,7 @@ void free_basic_memory_bitmaps(void) | |||
1131 | memory_bm_free(bm2, PG_UNSAFE_CLEAR); | 1133 | memory_bm_free(bm2, PG_UNSAFE_CLEAR); |
1132 | kfree(bm2); | 1134 | kfree(bm2); |
1133 | 1135 | ||
1134 | pr_debug("PM: Basic memory bitmaps freed\n"); | 1136 | pr_debug("Basic memory bitmaps freed\n"); |
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | void clear_free_pages(void) | 1139 | void clear_free_pages(void) |
@@ -1152,7 +1154,7 @@ void clear_free_pages(void) | |||
1152 | pfn = memory_bm_next_pfn(bm); | 1154 | pfn = memory_bm_next_pfn(bm); |
1153 | } | 1155 | } |
1154 | memory_bm_position_reset(bm); | 1156 | memory_bm_position_reset(bm); |
1155 | pr_info("PM: free pages cleared after restore\n"); | 1157 | pr_info("free pages cleared after restore\n"); |
1156 | #endif /* PAGE_POISONING_ZERO */ | 1158 | #endif /* PAGE_POISONING_ZERO */ |
1157 | } | 1159 | } |
1158 | 1160 | ||
@@ -1690,7 +1692,7 @@ int hibernate_preallocate_memory(void) | |||
1690 | ktime_t start, stop; | 1692 | ktime_t start, stop; |
1691 | int error; | 1693 | int error; |
1692 | 1694 | ||
1693 | printk(KERN_INFO "PM: Preallocating image memory... "); | 1695 | pr_info("Preallocating image memory... "); |
1694 | start = ktime_get(); | 1696 | start = ktime_get(); |
1695 | 1697 | ||
1696 | error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); | 1698 | error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); |
@@ -1821,13 +1823,13 @@ int hibernate_preallocate_memory(void) | |||
1821 | 1823 | ||
1822 | out: | 1824 | out: |
1823 | stop = ktime_get(); | 1825 | stop = ktime_get(); |
1824 | printk(KERN_CONT "done (allocated %lu pages)\n", pages); | 1826 | pr_cont("done (allocated %lu pages)\n", pages); |
1825 | swsusp_show_speed(start, stop, pages, "Allocated"); | 1827 | swsusp_show_speed(start, stop, pages, "Allocated"); |
1826 | 1828 | ||
1827 | return 0; | 1829 | return 0; |
1828 | 1830 | ||
1829 | err_out: | 1831 | err_out: |
1830 | printk(KERN_CONT "\n"); | 1832 | pr_cont("\n"); |
1831 | swsusp_free(); | 1833 | swsusp_free(); |
1832 | return -ENOMEM; | 1834 | return -ENOMEM; |
1833 | } | 1835 | } |
@@ -1867,8 +1869,8 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) | |||
1867 | free += zone_page_state(zone, NR_FREE_PAGES); | 1869 | free += zone_page_state(zone, NR_FREE_PAGES); |
1868 | 1870 | ||
1869 | nr_pages += count_pages_for_highmem(nr_highmem); | 1871 | nr_pages += count_pages_for_highmem(nr_highmem); |
1870 | pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n", | 1872 | pr_debug("Normal pages needed: %u + %u, available pages: %u\n", |
1871 | nr_pages, PAGES_FOR_IO, free); | 1873 | nr_pages, PAGES_FOR_IO, free); |
1872 | 1874 | ||
1873 | return free > nr_pages + PAGES_FOR_IO; | 1875 | return free > nr_pages + PAGES_FOR_IO; |
1874 | } | 1876 | } |
@@ -1961,20 +1963,20 @@ asmlinkage __visible int swsusp_save(void) | |||
1961 | { | 1963 | { |
1962 | unsigned int nr_pages, nr_highmem; | 1964 | unsigned int nr_pages, nr_highmem; |
1963 | 1965 | ||
1964 | printk(KERN_INFO "PM: Creating hibernation image:\n"); | 1966 | pr_info("Creating hibernation image:\n"); |
1965 | 1967 | ||
1966 | drain_local_pages(NULL); | 1968 | drain_local_pages(NULL); |
1967 | nr_pages = count_data_pages(); | 1969 | nr_pages = count_data_pages(); |
1968 | nr_highmem = count_highmem_pages(); | 1970 | nr_highmem = count_highmem_pages(); |
1969 | printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); | 1971 | pr_info("Need to copy %u pages\n", nr_pages + nr_highmem); |
1970 | 1972 | ||
1971 | if (!enough_free_mem(nr_pages, nr_highmem)) { | 1973 | if (!enough_free_mem(nr_pages, nr_highmem)) { |
1972 | printk(KERN_ERR "PM: Not enough free memory\n"); | 1974 | pr_err("Not enough free memory\n"); |
1973 | return -ENOMEM; | 1975 | return -ENOMEM; |
1974 | } | 1976 | } |
1975 | 1977 | ||
1976 | if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) { | 1978 | if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) { |
1977 | printk(KERN_ERR "PM: Memory allocation failed\n"); | 1979 | pr_err("Memory allocation failed\n"); |
1978 | return -ENOMEM; | 1980 | return -ENOMEM; |
1979 | } | 1981 | } |
1980 | 1982 | ||
@@ -1995,8 +1997,7 @@ asmlinkage __visible int swsusp_save(void) | |||
1995 | nr_copy_pages = nr_pages; | 1997 | nr_copy_pages = nr_pages; |
1996 | nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); | 1998 | nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); |
1997 | 1999 | ||
1998 | printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n", | 2000 | pr_info("Hibernation image created (%d pages copied)\n", nr_pages); |
1999 | nr_pages); | ||
2000 | 2001 | ||
2001 | return 0; | 2002 | return 0; |
2002 | } | 2003 | } |
@@ -2170,7 +2171,7 @@ static int check_header(struct swsusp_info *info) | |||
2170 | if (!reason && info->num_physpages != get_num_physpages()) | 2171 | if (!reason && info->num_physpages != get_num_physpages()) |
2171 | reason = "memory size"; | 2172 | reason = "memory size"; |
2172 | if (reason) { | 2173 | if (reason) { |
2173 | printk(KERN_ERR "PM: Image mismatch: %s\n", reason); | 2174 | pr_err("Image mismatch: %s\n", reason); |
2174 | return -EPERM; | 2175 | return -EPERM; |
2175 | } | 2176 | } |
2176 | return 0; | 2177 | return 0; |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index ccd2d20e6b06..0685c4499431 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -437,7 +437,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) | |||
437 | error = suspend_ops->enter(state); | 437 | error = suspend_ops->enter(state); |
438 | trace_suspend_resume(TPS("machine_suspend"), | 438 | trace_suspend_resume(TPS("machine_suspend"), |
439 | state, false); | 439 | state, false); |
440 | events_check_enabled = false; | ||
441 | } else if (*wakeup) { | 440 | } else if (*wakeup) { |
442 | error = -EBUSY; | 441 | error = -EBUSY; |
443 | } | 442 | } |
@@ -582,6 +581,7 @@ static int enter_state(suspend_state_t state) | |||
582 | pm_restore_gfp_mask(); | 581 | pm_restore_gfp_mask(); |
583 | 582 | ||
584 | Finish: | 583 | Finish: |
584 | events_check_enabled = false; | ||
585 | pm_pr_dbg("Finishing wakeup.\n"); | 585 | pm_pr_dbg("Finishing wakeup.\n"); |
586 | suspend_finish(); | 586 | suspend_finish(); |
587 | Unlock: | 587 | Unlock: |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index d7cdc426ee38..293ead59eccc 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define pr_fmt(fmt) "PM: " fmt | ||
16 | |||
15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
16 | #include <linux/file.h> | 18 | #include <linux/file.h> |
17 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
@@ -241,9 +243,9 @@ static void hib_end_io(struct bio *bio) | |||
241 | struct page *page = bio->bi_io_vec[0].bv_page; | 243 | struct page *page = bio->bi_io_vec[0].bv_page; |
242 | 244 | ||
243 | if (bio->bi_status) { | 245 | if (bio->bi_status) { |
244 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", | 246 | pr_alert("Read-error on swap-device (%u:%u:%Lu)\n", |
245 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), | 247 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
246 | (unsigned long long)bio->bi_iter.bi_sector); | 248 | (unsigned long long)bio->bi_iter.bi_sector); |
247 | } | 249 | } |
248 | 250 | ||
249 | if (bio_data_dir(bio) == WRITE) | 251 | if (bio_data_dir(bio) == WRITE) |
@@ -273,8 +275,8 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, | |||
273 | bio_set_op_attrs(bio, op, op_flags); | 275 | bio_set_op_attrs(bio, op, op_flags); |
274 | 276 | ||
275 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { | 277 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
276 | printk(KERN_ERR "PM: Adding page to bio failed at %llu\n", | 278 | pr_err("Adding page to bio failed at %llu\n", |
277 | (unsigned long long)bio->bi_iter.bi_sector); | 279 | (unsigned long long)bio->bi_iter.bi_sector); |
278 | bio_put(bio); | 280 | bio_put(bio); |
279 | return -EFAULT; | 281 | return -EFAULT; |
280 | } | 282 | } |
@@ -319,7 +321,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
319 | error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, | 321 | error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, |
320 | swsusp_resume_block, swsusp_header, NULL); | 322 | swsusp_resume_block, swsusp_header, NULL); |
321 | } else { | 323 | } else { |
322 | printk(KERN_ERR "PM: Swap header not found!\n"); | 324 | pr_err("Swap header not found!\n"); |
323 | error = -ENODEV; | 325 | error = -ENODEV; |
324 | } | 326 | } |
325 | return error; | 327 | return error; |
@@ -413,8 +415,7 @@ static int get_swap_writer(struct swap_map_handle *handle) | |||
413 | ret = swsusp_swap_check(); | 415 | ret = swsusp_swap_check(); |
414 | if (ret) { | 416 | if (ret) { |
415 | if (ret != -ENOSPC) | 417 | if (ret != -ENOSPC) |
416 | printk(KERN_ERR "PM: Cannot find swap device, try " | 418 | pr_err("Cannot find swap device, try swapon -a\n"); |
417 | "swapon -a.\n"); | ||
418 | return ret; | 419 | return ret; |
419 | } | 420 | } |
420 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); | 421 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); |
@@ -491,9 +492,9 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
491 | { | 492 | { |
492 | if (!error) { | 493 | if (!error) { |
493 | flush_swap_writer(handle); | 494 | flush_swap_writer(handle); |
494 | printk(KERN_INFO "PM: S"); | 495 | pr_info("S"); |
495 | error = mark_swapfiles(handle, flags); | 496 | error = mark_swapfiles(handle, flags); |
496 | printk("|\n"); | 497 | pr_cont("|\n"); |
497 | } | 498 | } |
498 | 499 | ||
499 | if (error) | 500 | if (error) |
@@ -542,7 +543,7 @@ static int save_image(struct swap_map_handle *handle, | |||
542 | 543 | ||
543 | hib_init_batch(&hb); | 544 | hib_init_batch(&hb); |
544 | 545 | ||
545 | printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n", | 546 | pr_info("Saving image data pages (%u pages)...\n", |
546 | nr_to_write); | 547 | nr_to_write); |
547 | m = nr_to_write / 10; | 548 | m = nr_to_write / 10; |
548 | if (!m) | 549 | if (!m) |
@@ -557,8 +558,8 @@ static int save_image(struct swap_map_handle *handle, | |||
557 | if (ret) | 558 | if (ret) |
558 | break; | 559 | break; |
559 | if (!(nr_pages % m)) | 560 | if (!(nr_pages % m)) |
560 | printk(KERN_INFO "PM: Image saving progress: %3d%%\n", | 561 | pr_info("Image saving progress: %3d%%\n", |
561 | nr_pages / m * 10); | 562 | nr_pages / m * 10); |
562 | nr_pages++; | 563 | nr_pages++; |
563 | } | 564 | } |
564 | err2 = hib_wait_io(&hb); | 565 | err2 = hib_wait_io(&hb); |
@@ -566,7 +567,7 @@ static int save_image(struct swap_map_handle *handle, | |||
566 | if (!ret) | 567 | if (!ret) |
567 | ret = err2; | 568 | ret = err2; |
568 | if (!ret) | 569 | if (!ret) |
569 | printk(KERN_INFO "PM: Image saving done.\n"); | 570 | pr_info("Image saving done\n"); |
570 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); | 571 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); |
571 | return ret; | 572 | return ret; |
572 | } | 573 | } |
@@ -692,14 +693,14 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
692 | 693 | ||
693 | page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); | 694 | page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); |
694 | if (!page) { | 695 | if (!page) { |
695 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 696 | pr_err("Failed to allocate LZO page\n"); |
696 | ret = -ENOMEM; | 697 | ret = -ENOMEM; |
697 | goto out_clean; | 698 | goto out_clean; |
698 | } | 699 | } |
699 | 700 | ||
700 | data = vmalloc(sizeof(*data) * nr_threads); | 701 | data = vmalloc(sizeof(*data) * nr_threads); |
701 | if (!data) { | 702 | if (!data) { |
702 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | 703 | pr_err("Failed to allocate LZO data\n"); |
703 | ret = -ENOMEM; | 704 | ret = -ENOMEM; |
704 | goto out_clean; | 705 | goto out_clean; |
705 | } | 706 | } |
@@ -708,7 +709,7 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
708 | 709 | ||
709 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 710 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
710 | if (!crc) { | 711 | if (!crc) { |
711 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 712 | pr_err("Failed to allocate crc\n"); |
712 | ret = -ENOMEM; | 713 | ret = -ENOMEM; |
713 | goto out_clean; | 714 | goto out_clean; |
714 | } | 715 | } |
@@ -726,8 +727,7 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
726 | "image_compress/%u", thr); | 727 | "image_compress/%u", thr); |
727 | if (IS_ERR(data[thr].thr)) { | 728 | if (IS_ERR(data[thr].thr)) { |
728 | data[thr].thr = NULL; | 729 | data[thr].thr = NULL; |
729 | printk(KERN_ERR | 730 | pr_err("Cannot start compression threads\n"); |
730 | "PM: Cannot start compression threads\n"); | ||
731 | ret = -ENOMEM; | 731 | ret = -ENOMEM; |
732 | goto out_clean; | 732 | goto out_clean; |
733 | } | 733 | } |
@@ -749,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
749 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 749 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
750 | if (IS_ERR(crc->thr)) { | 750 | if (IS_ERR(crc->thr)) { |
751 | crc->thr = NULL; | 751 | crc->thr = NULL; |
752 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 752 | pr_err("Cannot start CRC32 thread\n"); |
753 | ret = -ENOMEM; | 753 | ret = -ENOMEM; |
754 | goto out_clean; | 754 | goto out_clean; |
755 | } | 755 | } |
@@ -760,10 +760,9 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
760 | */ | 760 | */ |
761 | handle->reqd_free_pages = reqd_free_pages(); | 761 | handle->reqd_free_pages = reqd_free_pages(); |
762 | 762 | ||
763 | printk(KERN_INFO | 763 | pr_info("Using %u thread(s) for compression\n", nr_threads); |
764 | "PM: Using %u thread(s) for compression.\n" | 764 | pr_info("Compressing and saving image data (%u pages)...\n", |
765 | "PM: Compressing and saving image data (%u pages)...\n", | 765 | nr_to_write); |
766 | nr_threads, nr_to_write); | ||
767 | m = nr_to_write / 10; | 766 | m = nr_to_write / 10; |
768 | if (!m) | 767 | if (!m) |
769 | m = 1; | 768 | m = 1; |
@@ -783,10 +782,8 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
783 | data_of(*snapshot), PAGE_SIZE); | 782 | data_of(*snapshot), PAGE_SIZE); |
784 | 783 | ||
785 | if (!(nr_pages % m)) | 784 | if (!(nr_pages % m)) |
786 | printk(KERN_INFO | 785 | pr_info("Image saving progress: %3d%%\n", |
787 | "PM: Image saving progress: " | 786 | nr_pages / m * 10); |
788 | "%3d%%\n", | ||
789 | nr_pages / m * 10); | ||
790 | nr_pages++; | 787 | nr_pages++; |
791 | } | 788 | } |
792 | if (!off) | 789 | if (!off) |
@@ -813,15 +810,14 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
813 | ret = data[thr].ret; | 810 | ret = data[thr].ret; |
814 | 811 | ||
815 | if (ret < 0) { | 812 | if (ret < 0) { |
816 | printk(KERN_ERR "PM: LZO compression failed\n"); | 813 | pr_err("LZO compression failed\n"); |
817 | goto out_finish; | 814 | goto out_finish; |
818 | } | 815 | } |
819 | 816 | ||
820 | if (unlikely(!data[thr].cmp_len || | 817 | if (unlikely(!data[thr].cmp_len || |
821 | data[thr].cmp_len > | 818 | data[thr].cmp_len > |
822 | lzo1x_worst_compress(data[thr].unc_len))) { | 819 | lzo1x_worst_compress(data[thr].unc_len))) { |
823 | printk(KERN_ERR | 820 | pr_err("Invalid LZO compressed length\n"); |
824 | "PM: Invalid LZO compressed length\n"); | ||
825 | ret = -1; | 821 | ret = -1; |
826 | goto out_finish; | 822 | goto out_finish; |
827 | } | 823 | } |
@@ -857,7 +853,7 @@ out_finish: | |||
857 | if (!ret) | 853 | if (!ret) |
858 | ret = err2; | 854 | ret = err2; |
859 | if (!ret) | 855 | if (!ret) |
860 | printk(KERN_INFO "PM: Image saving done.\n"); | 856 | pr_info("Image saving done\n"); |
861 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); | 857 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); |
862 | out_clean: | 858 | out_clean: |
863 | if (crc) { | 859 | if (crc) { |
@@ -888,7 +884,7 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags) | |||
888 | unsigned int free_swap = count_swap_pages(root_swap, 1); | 884 | unsigned int free_swap = count_swap_pages(root_swap, 1); |
889 | unsigned int required; | 885 | unsigned int required; |
890 | 886 | ||
891 | pr_debug("PM: Free swap pages: %u\n", free_swap); | 887 | pr_debug("Free swap pages: %u\n", free_swap); |
892 | 888 | ||
893 | required = PAGES_FOR_IO + nr_pages; | 889 | required = PAGES_FOR_IO + nr_pages; |
894 | return free_swap > required; | 890 | return free_swap > required; |
@@ -915,12 +911,12 @@ int swsusp_write(unsigned int flags) | |||
915 | pages = snapshot_get_image_size(); | 911 | pages = snapshot_get_image_size(); |
916 | error = get_swap_writer(&handle); | 912 | error = get_swap_writer(&handle); |
917 | if (error) { | 913 | if (error) { |
918 | printk(KERN_ERR "PM: Cannot get swap writer\n"); | 914 | pr_err("Cannot get swap writer\n"); |
919 | return error; | 915 | return error; |
920 | } | 916 | } |
921 | if (flags & SF_NOCOMPRESS_MODE) { | 917 | if (flags & SF_NOCOMPRESS_MODE) { |
922 | if (!enough_swap(pages, flags)) { | 918 | if (!enough_swap(pages, flags)) { |
923 | printk(KERN_ERR "PM: Not enough free swap\n"); | 919 | pr_err("Not enough free swap\n"); |
924 | error = -ENOSPC; | 920 | error = -ENOSPC; |
925 | goto out_finish; | 921 | goto out_finish; |
926 | } | 922 | } |
@@ -1068,8 +1064,7 @@ static int load_image(struct swap_map_handle *handle, | |||
1068 | hib_init_batch(&hb); | 1064 | hib_init_batch(&hb); |
1069 | 1065 | ||
1070 | clean_pages_on_read = true; | 1066 | clean_pages_on_read = true; |
1071 | printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", | 1067 | pr_info("Loading image data pages (%u pages)...\n", nr_to_read); |
1072 | nr_to_read); | ||
1073 | m = nr_to_read / 10; | 1068 | m = nr_to_read / 10; |
1074 | if (!m) | 1069 | if (!m) |
1075 | m = 1; | 1070 | m = 1; |
@@ -1087,8 +1082,8 @@ static int load_image(struct swap_map_handle *handle, | |||
1087 | if (ret) | 1082 | if (ret) |
1088 | break; | 1083 | break; |
1089 | if (!(nr_pages % m)) | 1084 | if (!(nr_pages % m)) |
1090 | printk(KERN_INFO "PM: Image loading progress: %3d%%\n", | 1085 | pr_info("Image loading progress: %3d%%\n", |
1091 | nr_pages / m * 10); | 1086 | nr_pages / m * 10); |
1092 | nr_pages++; | 1087 | nr_pages++; |
1093 | } | 1088 | } |
1094 | err2 = hib_wait_io(&hb); | 1089 | err2 = hib_wait_io(&hb); |
@@ -1096,7 +1091,7 @@ static int load_image(struct swap_map_handle *handle, | |||
1096 | if (!ret) | 1091 | if (!ret) |
1097 | ret = err2; | 1092 | ret = err2; |
1098 | if (!ret) { | 1093 | if (!ret) { |
1099 | printk(KERN_INFO "PM: Image loading done.\n"); | 1094 | pr_info("Image loading done\n"); |
1100 | snapshot_write_finalize(snapshot); | 1095 | snapshot_write_finalize(snapshot); |
1101 | if (!snapshot_image_loaded(snapshot)) | 1096 | if (!snapshot_image_loaded(snapshot)) |
1102 | ret = -ENODATA; | 1097 | ret = -ENODATA; |
@@ -1190,14 +1185,14 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1190 | 1185 | ||
1191 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); | 1186 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); |
1192 | if (!page) { | 1187 | if (!page) { |
1193 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1188 | pr_err("Failed to allocate LZO page\n"); |
1194 | ret = -ENOMEM; | 1189 | ret = -ENOMEM; |
1195 | goto out_clean; | 1190 | goto out_clean; |
1196 | } | 1191 | } |
1197 | 1192 | ||
1198 | data = vmalloc(sizeof(*data) * nr_threads); | 1193 | data = vmalloc(sizeof(*data) * nr_threads); |
1199 | if (!data) { | 1194 | if (!data) { |
1200 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | 1195 | pr_err("Failed to allocate LZO data\n"); |
1201 | ret = -ENOMEM; | 1196 | ret = -ENOMEM; |
1202 | goto out_clean; | 1197 | goto out_clean; |
1203 | } | 1198 | } |
@@ -1206,7 +1201,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1206 | 1201 | ||
1207 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 1202 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
1208 | if (!crc) { | 1203 | if (!crc) { |
1209 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 1204 | pr_err("Failed to allocate crc\n"); |
1210 | ret = -ENOMEM; | 1205 | ret = -ENOMEM; |
1211 | goto out_clean; | 1206 | goto out_clean; |
1212 | } | 1207 | } |
@@ -1226,8 +1221,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1226 | "image_decompress/%u", thr); | 1221 | "image_decompress/%u", thr); |
1227 | if (IS_ERR(data[thr].thr)) { | 1222 | if (IS_ERR(data[thr].thr)) { |
1228 | data[thr].thr = NULL; | 1223 | data[thr].thr = NULL; |
1229 | printk(KERN_ERR | 1224 | pr_err("Cannot start decompression threads\n"); |
1230 | "PM: Cannot start decompression threads\n"); | ||
1231 | ret = -ENOMEM; | 1225 | ret = -ENOMEM; |
1232 | goto out_clean; | 1226 | goto out_clean; |
1233 | } | 1227 | } |
@@ -1249,7 +1243,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1249 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 1243 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
1250 | if (IS_ERR(crc->thr)) { | 1244 | if (IS_ERR(crc->thr)) { |
1251 | crc->thr = NULL; | 1245 | crc->thr = NULL; |
1252 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 1246 | pr_err("Cannot start CRC32 thread\n"); |
1253 | ret = -ENOMEM; | 1247 | ret = -ENOMEM; |
1254 | goto out_clean; | 1248 | goto out_clean; |
1255 | } | 1249 | } |
@@ -1274,8 +1268,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1274 | if (!page[i]) { | 1268 | if (!page[i]) { |
1275 | if (i < LZO_CMP_PAGES) { | 1269 | if (i < LZO_CMP_PAGES) { |
1276 | ring_size = i; | 1270 | ring_size = i; |
1277 | printk(KERN_ERR | 1271 | pr_err("Failed to allocate LZO pages\n"); |
1278 | "PM: Failed to allocate LZO pages\n"); | ||
1279 | ret = -ENOMEM; | 1272 | ret = -ENOMEM; |
1280 | goto out_clean; | 1273 | goto out_clean; |
1281 | } else { | 1274 | } else { |
@@ -1285,10 +1278,9 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1285 | } | 1278 | } |
1286 | want = ring_size = i; | 1279 | want = ring_size = i; |
1287 | 1280 | ||
1288 | printk(KERN_INFO | 1281 | pr_info("Using %u thread(s) for decompression\n", nr_threads); |
1289 | "PM: Using %u thread(s) for decompression.\n" | 1282 | pr_info("Loading and decompressing image data (%u pages)...\n", |
1290 | "PM: Loading and decompressing image data (%u pages)...\n", | 1283 | nr_to_read); |
1291 | nr_threads, nr_to_read); | ||
1292 | m = nr_to_read / 10; | 1284 | m = nr_to_read / 10; |
1293 | if (!m) | 1285 | if (!m) |
1294 | m = 1; | 1286 | m = 1; |
@@ -1348,8 +1340,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1348 | if (unlikely(!data[thr].cmp_len || | 1340 | if (unlikely(!data[thr].cmp_len || |
1349 | data[thr].cmp_len > | 1341 | data[thr].cmp_len > |
1350 | lzo1x_worst_compress(LZO_UNC_SIZE))) { | 1342 | lzo1x_worst_compress(LZO_UNC_SIZE))) { |
1351 | printk(KERN_ERR | 1343 | pr_err("Invalid LZO compressed length\n"); |
1352 | "PM: Invalid LZO compressed length\n"); | ||
1353 | ret = -1; | 1344 | ret = -1; |
1354 | goto out_finish; | 1345 | goto out_finish; |
1355 | } | 1346 | } |
@@ -1400,16 +1391,14 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1400 | ret = data[thr].ret; | 1391 | ret = data[thr].ret; |
1401 | 1392 | ||
1402 | if (ret < 0) { | 1393 | if (ret < 0) { |
1403 | printk(KERN_ERR | 1394 | pr_err("LZO decompression failed\n"); |
1404 | "PM: LZO decompression failed\n"); | ||
1405 | goto out_finish; | 1395 | goto out_finish; |
1406 | } | 1396 | } |
1407 | 1397 | ||
1408 | if (unlikely(!data[thr].unc_len || | 1398 | if (unlikely(!data[thr].unc_len || |
1409 | data[thr].unc_len > LZO_UNC_SIZE || | 1399 | data[thr].unc_len > LZO_UNC_SIZE || |
1410 | data[thr].unc_len & (PAGE_SIZE - 1))) { | 1400 | data[thr].unc_len & (PAGE_SIZE - 1))) { |
1411 | printk(KERN_ERR | 1401 | pr_err("Invalid LZO uncompressed length\n"); |
1412 | "PM: Invalid LZO uncompressed length\n"); | ||
1413 | ret = -1; | 1402 | ret = -1; |
1414 | goto out_finish; | 1403 | goto out_finish; |
1415 | } | 1404 | } |
@@ -1420,10 +1409,8 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1420 | data[thr].unc + off, PAGE_SIZE); | 1409 | data[thr].unc + off, PAGE_SIZE); |
1421 | 1410 | ||
1422 | if (!(nr_pages % m)) | 1411 | if (!(nr_pages % m)) |
1423 | printk(KERN_INFO | 1412 | pr_info("Image loading progress: %3d%%\n", |
1424 | "PM: Image loading progress: " | 1413 | nr_pages / m * 10); |
1425 | "%3d%%\n", | ||
1426 | nr_pages / m * 10); | ||
1427 | nr_pages++; | 1414 | nr_pages++; |
1428 | 1415 | ||
1429 | ret = snapshot_write_next(snapshot); | 1416 | ret = snapshot_write_next(snapshot); |
@@ -1448,15 +1435,14 @@ out_finish: | |||
1448 | } | 1435 | } |
1449 | stop = ktime_get(); | 1436 | stop = ktime_get(); |
1450 | if (!ret) { | 1437 | if (!ret) { |
1451 | printk(KERN_INFO "PM: Image loading done.\n"); | 1438 | pr_info("Image loading done\n"); |
1452 | snapshot_write_finalize(snapshot); | 1439 | snapshot_write_finalize(snapshot); |
1453 | if (!snapshot_image_loaded(snapshot)) | 1440 | if (!snapshot_image_loaded(snapshot)) |
1454 | ret = -ENODATA; | 1441 | ret = -ENODATA; |
1455 | if (!ret) { | 1442 | if (!ret) { |
1456 | if (swsusp_header->flags & SF_CRC32_MODE) { | 1443 | if (swsusp_header->flags & SF_CRC32_MODE) { |
1457 | if(handle->crc32 != swsusp_header->crc32) { | 1444 | if(handle->crc32 != swsusp_header->crc32) { |
1458 | printk(KERN_ERR | 1445 | pr_err("Invalid image CRC32!\n"); |
1459 | "PM: Invalid image CRC32!\n"); | ||
1460 | ret = -ENODATA; | 1446 | ret = -ENODATA; |
1461 | } | 1447 | } |
1462 | } | 1448 | } |
@@ -1513,9 +1499,9 @@ int swsusp_read(unsigned int *flags_p) | |||
1513 | swap_reader_finish(&handle); | 1499 | swap_reader_finish(&handle); |
1514 | end: | 1500 | end: |
1515 | if (!error) | 1501 | if (!error) |
1516 | pr_debug("PM: Image successfully loaded\n"); | 1502 | pr_debug("Image successfully loaded\n"); |
1517 | else | 1503 | else |
1518 | pr_debug("PM: Error %d resuming\n", error); | 1504 | pr_debug("Error %d resuming\n", error); |
1519 | return error; | 1505 | return error; |
1520 | } | 1506 | } |
1521 | 1507 | ||
@@ -1552,13 +1538,13 @@ put: | |||
1552 | if (error) | 1538 | if (error) |
1553 | blkdev_put(hib_resume_bdev, FMODE_READ); | 1539 | blkdev_put(hib_resume_bdev, FMODE_READ); |
1554 | else | 1540 | else |
1555 | pr_debug("PM: Image signature found, resuming\n"); | 1541 | pr_debug("Image signature found, resuming\n"); |
1556 | } else { | 1542 | } else { |
1557 | error = PTR_ERR(hib_resume_bdev); | 1543 | error = PTR_ERR(hib_resume_bdev); |
1558 | } | 1544 | } |
1559 | 1545 | ||
1560 | if (error) | 1546 | if (error) |
1561 | pr_debug("PM: Image not found (code %d)\n", error); | 1547 | pr_debug("Image not found (code %d)\n", error); |
1562 | 1548 | ||
1563 | return error; | 1549 | return error; |
1564 | } | 1550 | } |
@@ -1570,7 +1556,7 @@ put: | |||
1570 | void swsusp_close(fmode_t mode) | 1556 | void swsusp_close(fmode_t mode) |
1571 | { | 1557 | { |
1572 | if (IS_ERR(hib_resume_bdev)) { | 1558 | if (IS_ERR(hib_resume_bdev)) { |
1573 | pr_debug("PM: Image device not initialised\n"); | 1559 | pr_debug("Image device not initialised\n"); |
1574 | return; | 1560 | return; |
1575 | } | 1561 | } |
1576 | 1562 | ||
@@ -1594,7 +1580,7 @@ int swsusp_unmark(void) | |||
1594 | swsusp_resume_block, | 1580 | swsusp_resume_block, |
1595 | swsusp_header, NULL); | 1581 | swsusp_header, NULL); |
1596 | } else { | 1582 | } else { |
1597 | printk(KERN_ERR "PM: Cannot find swsusp signature!\n"); | 1583 | pr_err("Cannot find swsusp signature!\n"); |
1598 | error = -ENODEV; | 1584 | error = -ENODEV; |
1599 | } | 1585 | } |
1600 | 1586 | ||