aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-08 17:14:17 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-08 17:14:17 -0400
commit63f9ccb89552f25fa8cac57a6796479ca7eb527d (patch)
tree6b8286f4d70ccf4f2a5b65764af7887836d02bf1 /kernel/power
parent65c0554b73c920023cc8998802e508b798113b46 (diff)
parent307c5971c972ef2bfd541d2850b36a692c6354c9 (diff)
Merge back earlier suspend/hibernation changes for v4.8.
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/console.c8
-rw-r--r--kernel/power/hibernate.c20
-rw-r--r--kernel/power/main.c11
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c3
-rw-r--r--kernel/power/snapshot.c170
-rw-r--r--kernel/power/suspend.c10
-rw-r--r--kernel/power/user.c14
9 files changed, 147 insertions, 93 deletions
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index cb880a14cc39..eb4f717705ba 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,6 +1,8 @@
1 1
2ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG 2ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
3 3
4KASAN_SANITIZE_snapshot.o := n
5
4obj-y += qos.o 6obj-y += qos.o
5obj-$(CONFIG_PM) += main.o 7obj-$(CONFIG_PM) += main.o
6obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o 8obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
diff --git a/kernel/power/console.c b/kernel/power/console.c
index aba9c545a0e3..0e781798b0b3 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -126,17 +126,17 @@ out:
126 return ret; 126 return ret;
127} 127}
128 128
129int pm_prepare_console(void) 129void pm_prepare_console(void)
130{ 130{
131 if (!pm_vt_switch()) 131 if (!pm_vt_switch())
132 return 0; 132 return;
133 133
134 orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); 134 orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
135 if (orig_fgconsole < 0) 135 if (orig_fgconsole < 0)
136 return 1; 136 return;
137 137
138 orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE); 138 orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
139 return 0; 139 return;
140} 140}
141 141
142void pm_restore_console(void) 142void pm_restore_console(void)
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index fca9254280ee..126e24caa82e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -647,7 +647,7 @@ static void power_down(void)
647 */ 647 */
648int hibernate(void) 648int hibernate(void)
649{ 649{
650 int error; 650 int error, nr_calls = 0;
651 651
652 if (!hibernation_available()) { 652 if (!hibernation_available()) {
653 pr_debug("PM: Hibernation not available.\n"); 653 pr_debug("PM: Hibernation not available.\n");
@@ -662,9 +662,11 @@ int hibernate(void)
662 } 662 }
663 663
664 pm_prepare_console(); 664 pm_prepare_console();
665 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 665 error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
666 if (error) 666 if (error) {
667 nr_calls--;
667 goto Exit; 668 goto Exit;
669 }
668 670
669 printk(KERN_INFO "PM: Syncing filesystems ... "); 671 printk(KERN_INFO "PM: Syncing filesystems ... ");
670 sys_sync(); 672 sys_sync();
@@ -714,7 +716,7 @@ int hibernate(void)
714 /* Don't bother checking whether freezer_test_done is true */ 716 /* Don't bother checking whether freezer_test_done is true */
715 freezer_test_done = false; 717 freezer_test_done = false;
716 Exit: 718 Exit:
717 pm_notifier_call_chain(PM_POST_HIBERNATION); 719 __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
718 pm_restore_console(); 720 pm_restore_console();
719 atomic_inc(&snapshot_device_available); 721 atomic_inc(&snapshot_device_available);
720 Unlock: 722 Unlock:
@@ -740,7 +742,7 @@ int hibernate(void)
740 */ 742 */
741static int software_resume(void) 743static int software_resume(void)
742{ 744{
743 int error; 745 int error, nr_calls = 0;
744 unsigned int flags; 746 unsigned int flags;
745 747
746 /* 748 /*
@@ -827,9 +829,11 @@ static int software_resume(void)
827 } 829 }
828 830
829 pm_prepare_console(); 831 pm_prepare_console();
830 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 832 error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
831 if (error) 833 if (error) {
834 nr_calls--;
832 goto Close_Finish; 835 goto Close_Finish;
836 }
833 837
834 pr_debug("PM: Preparing processes for restore.\n"); 838 pr_debug("PM: Preparing processes for restore.\n");
835 error = freeze_processes(); 839 error = freeze_processes();
@@ -855,7 +859,7 @@ static int software_resume(void)
855 unlock_device_hotplug(); 859 unlock_device_hotplug();
856 thaw_processes(); 860 thaw_processes();
857 Finish: 861 Finish:
858 pm_notifier_call_chain(PM_POST_RESTORE); 862 __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
859 pm_restore_console(); 863 pm_restore_console();
860 atomic_inc(&snapshot_device_available); 864 atomic_inc(&snapshot_device_available);
861 /* For success case, the suspend path will release the lock */ 865 /* For success case, the suspend path will release the lock */
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 27946975eff0..5ea50b1b7595 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -38,12 +38,19 @@ int unregister_pm_notifier(struct notifier_block *nb)
38} 38}
39EXPORT_SYMBOL_GPL(unregister_pm_notifier); 39EXPORT_SYMBOL_GPL(unregister_pm_notifier);
40 40
41int pm_notifier_call_chain(unsigned long val) 41int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
42{ 42{
43 int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL); 43 int ret;
44
45 ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
46 nr_to_call, nr_calls);
44 47
45 return notifier_to_errno(ret); 48 return notifier_to_errno(ret);
46} 49}
50int pm_notifier_call_chain(unsigned long val)
51{
52 return __pm_notifier_call_chain(val, -1, NULL);
53}
47 54
48/* If set, devices may be suspended and resumed asynchronously. */ 55/* If set, devices may be suspended and resumed asynchronously. */
49int pm_async_enabled = 1; 56int pm_async_enabled = 1;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index efe1b3b17c88..51f02ecaf125 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -200,6 +200,8 @@ static inline void suspend_test_finish(const char *label) {}
200 200
201#ifdef CONFIG_PM_SLEEP 201#ifdef CONFIG_PM_SLEEP
202/* kernel/power/main.c */ 202/* kernel/power/main.c */
203extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
204 int *nr_calls);
203extern int pm_notifier_call_chain(unsigned long val); 205extern int pm_notifier_call_chain(unsigned long val);
204#endif 206#endif
205 207
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0c2ee9761d57..8f27d5a8adf6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -89,6 +89,9 @@ static int try_to_freeze_tasks(bool user_only)
89 elapsed_msecs / 1000, elapsed_msecs % 1000, 89 elapsed_msecs / 1000, elapsed_msecs % 1000,
90 todo - wq_busy, wq_busy); 90 todo - wq_busy, wq_busy);
91 91
92 if (wq_busy)
93 show_workqueue_state();
94
92 if (!wakeup) { 95 if (!wakeup) {
93 read_lock(&tasklist_lock); 96 read_lock(&tasklist_lock);
94 for_each_process_thread(g, p) { 97 for_each_process_thread(g, p) {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3a970604308f..94b6fe6c9ae3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -74,6 +74,22 @@ void __init hibernate_image_size_init(void)
74 */ 74 */
75struct pbe *restore_pblist; 75struct pbe *restore_pblist;
76 76
77/* struct linked_page is used to build chains of pages */
78
79#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
80
81struct linked_page {
82 struct linked_page *next;
83 char data[LINKED_PAGE_DATA_SIZE];
84} __packed;
85
86/*
87 * List of "safe" pages (ie. pages that were not used by the image kernel
88 * before hibernation) that may be used as temporary storage for image kernel
89 * memory contents.
90 */
91static struct linked_page *safe_pages_list;
92
77/* Pointer to an auxiliary buffer (1 page) */ 93/* Pointer to an auxiliary buffer (1 page) */
78static void *buffer; 94static void *buffer;
79 95
@@ -113,9 +129,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
113 return res; 129 return res;
114} 130}
115 131
132static void *__get_safe_page(gfp_t gfp_mask)
133{
134 if (safe_pages_list) {
135 void *ret = safe_pages_list;
136
137 safe_pages_list = safe_pages_list->next;
138 memset(ret, 0, PAGE_SIZE);
139 return ret;
140 }
141 return get_image_page(gfp_mask, PG_SAFE);
142}
143
116unsigned long get_safe_page(gfp_t gfp_mask) 144unsigned long get_safe_page(gfp_t gfp_mask)
117{ 145{
118 return (unsigned long)get_image_page(gfp_mask, PG_SAFE); 146 return (unsigned long)__get_safe_page(gfp_mask);
119} 147}
120 148
121static struct page *alloc_image_page(gfp_t gfp_mask) 149static struct page *alloc_image_page(gfp_t gfp_mask)
@@ -130,6 +158,14 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
130 return page; 158 return page;
131} 159}
132 160
161static void recycle_safe_page(void *page_address)
162{
163 struct linked_page *lp = page_address;
164
165 lp->next = safe_pages_list;
166 safe_pages_list = lp;
167}
168
133/** 169/**
134 * free_image_page - free page represented by @addr, allocated with 170 * free_image_page - free page represented by @addr, allocated with
135 * get_image_page (page flags set by it must be cleared) 171 * get_image_page (page flags set by it must be cleared)
@@ -150,15 +186,6 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
150 __free_page(page); 186 __free_page(page);
151} 187}
152 188
153/* struct linked_page is used to build chains of pages */
154
155#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
156
157struct linked_page {
158 struct linked_page *next;
159 char data[LINKED_PAGE_DATA_SIZE];
160} __packed;
161
162static inline void 189static inline void
163free_list_of_pages(struct linked_page *list, int clear_page_nosave) 190free_list_of_pages(struct linked_page *list, int clear_page_nosave)
164{ 191{
@@ -208,7 +235,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
208 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 235 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
209 struct linked_page *lp; 236 struct linked_page *lp;
210 237
211 lp = get_image_page(ca->gfp_mask, ca->safe_needed); 238 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
239 get_image_page(ca->gfp_mask, PG_ANY);
212 if (!lp) 240 if (!lp)
213 return NULL; 241 return NULL;
214 242
@@ -832,6 +860,34 @@ struct nosave_region {
832 860
833static LIST_HEAD(nosave_regions); 861static LIST_HEAD(nosave_regions);
834 862
863static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
864{
865 struct rtree_node *node;
866
867 list_for_each_entry(node, &zone->nodes, list)
868 recycle_safe_page(node->data);
869
870 list_for_each_entry(node, &zone->leaves, list)
871 recycle_safe_page(node->data);
872}
873
874static void memory_bm_recycle(struct memory_bitmap *bm)
875{
876 struct mem_zone_bm_rtree *zone;
877 struct linked_page *p_list;
878
879 list_for_each_entry(zone, &bm->zones, list)
880 recycle_zone_bm_rtree(zone);
881
882 p_list = bm->p_list;
883 while (p_list) {
884 struct linked_page *lp = p_list;
885
886 p_list = lp->next;
887 recycle_safe_page(lp);
888 }
889}
890
835/** 891/**
836 * register_nosave_region - register a range of page frames the contents 892 * register_nosave_region - register a range of page frames the contents
837 * of which should not be saved during the suspend (to be used in the early 893 * of which should not be saved during the suspend (to be used in the early
@@ -1999,53 +2055,41 @@ int snapshot_read_next(struct snapshot_handle *handle)
1999 return PAGE_SIZE; 2055 return PAGE_SIZE;
2000} 2056}
2001 2057
2058static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2059 struct memory_bitmap *src)
2060{
2061 unsigned long pfn;
2062
2063 memory_bm_position_reset(src);
2064 pfn = memory_bm_next_pfn(src);
2065 while (pfn != BM_END_OF_MAP) {
2066 memory_bm_set_bit(dst, pfn);
2067 pfn = memory_bm_next_pfn(src);
2068 }
2069}
2070
2002/** 2071/**
2003 * mark_unsafe_pages - mark the pages that cannot be used for storing 2072 * mark_unsafe_pages - mark the pages that cannot be used for storing
2004 * the image during resume, because they conflict with the pages that 2073 * the image during resume, because they conflict with the pages that
2005 * had been used before suspend 2074 * had been used before suspend
2006 */ 2075 */
2007 2076
2008static int mark_unsafe_pages(struct memory_bitmap *bm) 2077static void mark_unsafe_pages(struct memory_bitmap *bm)
2009{ 2078{
2010 struct zone *zone; 2079 unsigned long pfn;
2011 unsigned long pfn, max_zone_pfn;
2012 2080
2013 /* Clear page flags */ 2081 /* Clear the "free"/"unsafe" bit for all PFNs */
2014 for_each_populated_zone(zone) { 2082 memory_bm_position_reset(free_pages_map);
2015 max_zone_pfn = zone_end_pfn(zone); 2083 pfn = memory_bm_next_pfn(free_pages_map);
2016 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2084 while (pfn != BM_END_OF_MAP) {
2017 if (pfn_valid(pfn)) 2085 memory_bm_clear_current(free_pages_map);
2018 swsusp_unset_page_free(pfn_to_page(pfn)); 2086 pfn = memory_bm_next_pfn(free_pages_map);
2019 } 2087 }
2020 2088
2021 /* Mark pages that correspond to the "original" pfns as "unsafe" */ 2089 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2022 memory_bm_position_reset(bm); 2090 duplicate_memory_bitmap(free_pages_map, bm);
2023 do {
2024 pfn = memory_bm_next_pfn(bm);
2025 if (likely(pfn != BM_END_OF_MAP)) {
2026 if (likely(pfn_valid(pfn)))
2027 swsusp_set_page_free(pfn_to_page(pfn));
2028 else
2029 return -EFAULT;
2030 }
2031 } while (pfn != BM_END_OF_MAP);
2032 2091
2033 allocated_unsafe_pages = 0; 2092 allocated_unsafe_pages = 0;
2034
2035 return 0;
2036}
2037
2038static void
2039duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
2040{
2041 unsigned long pfn;
2042
2043 memory_bm_position_reset(src);
2044 pfn = memory_bm_next_pfn(src);
2045 while (pfn != BM_END_OF_MAP) {
2046 memory_bm_set_bit(dst, pfn);
2047 pfn = memory_bm_next_pfn(src);
2048 }
2049} 2093}
2050 2094
2051static int check_header(struct swsusp_info *info) 2095static int check_header(struct swsusp_info *info)
@@ -2095,7 +2139,7 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2095 /* Extract and buffer page key for data page (s390 only). */ 2139 /* Extract and buffer page key for data page (s390 only). */
2096 page_key_memorize(buf + j); 2140 page_key_memorize(buf + j);
2097 2141
2098 if (memory_bm_pfn_present(bm, buf[j])) 2142 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2099 memory_bm_set_bit(bm, buf[j]); 2143 memory_bm_set_bit(bm, buf[j]);
2100 else 2144 else
2101 return -EFAULT; 2145 return -EFAULT;
@@ -2104,11 +2148,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2104 return 0; 2148 return 0;
2105} 2149}
2106 2150
2107/* List of "safe" pages that may be used to store data loaded from the suspend
2108 * image
2109 */
2110static struct linked_page *safe_pages_list;
2111
2112#ifdef CONFIG_HIGHMEM 2151#ifdef CONFIG_HIGHMEM
2113/* struct highmem_pbe is used for creating the list of highmem pages that 2152/* struct highmem_pbe is used for creating the list of highmem pages that
2114 * should be restored atomically during the resume from disk, because the page 2153 * should be restored atomically during the resume from disk, because the page
@@ -2334,7 +2373,7 @@ static int
2334prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2373prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2335{ 2374{
2336 unsigned int nr_pages, nr_highmem; 2375 unsigned int nr_pages, nr_highmem;
2337 struct linked_page *sp_list, *lp; 2376 struct linked_page *lp;
2338 int error; 2377 int error;
2339 2378
2340 /* If there is no highmem, the buffer will not be necessary */ 2379 /* If there is no highmem, the buffer will not be necessary */
@@ -2342,9 +2381,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2342 buffer = NULL; 2381 buffer = NULL;
2343 2382
2344 nr_highmem = count_highmem_image_pages(bm); 2383 nr_highmem = count_highmem_image_pages(bm);
2345 error = mark_unsafe_pages(bm); 2384 mark_unsafe_pages(bm);
2346 if (error)
2347 goto Free;
2348 2385
2349 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2386 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2350 if (error) 2387 if (error)
@@ -2362,9 +2399,9 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2362 * NOTE: This way we make sure there will be enough safe pages for the 2399 * NOTE: This way we make sure there will be enough safe pages for the
2363 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2400 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2364 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2401 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2402 *
2403 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2365 */ 2404 */
2366 sp_list = NULL;
2367 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2368 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2405 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2369 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2406 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2370 while (nr_pages > 0) { 2407 while (nr_pages > 0) {
@@ -2373,12 +2410,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2373 error = -ENOMEM; 2410 error = -ENOMEM;
2374 goto Free; 2411 goto Free;
2375 } 2412 }
2376 lp->next = sp_list; 2413 lp->next = safe_pages_list;
2377 sp_list = lp; 2414 safe_pages_list = lp;
2378 nr_pages--; 2415 nr_pages--;
2379 } 2416 }
2380 /* Preallocate memory for the image */ 2417 /* Preallocate memory for the image */
2381 safe_pages_list = NULL;
2382 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2418 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2383 while (nr_pages > 0) { 2419 while (nr_pages > 0) {
2384 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2420 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
@@ -2396,12 +2432,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2396 swsusp_set_page_free(virt_to_page(lp)); 2432 swsusp_set_page_free(virt_to_page(lp));
2397 nr_pages--; 2433 nr_pages--;
2398 } 2434 }
2399 /* Free the reserved safe pages so that chain_alloc() can use them */
2400 while (sp_list) {
2401 lp = sp_list->next;
2402 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2403 sp_list = lp;
2404 }
2405 return 0; 2435 return 0;
2406 2436
2407 Free: 2437 Free:
@@ -2491,6 +2521,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
2491 if (error) 2521 if (error)
2492 return error; 2522 return error;
2493 2523
2524 safe_pages_list = NULL;
2525
2494 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY); 2526 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2495 if (error) 2527 if (error)
2496 return error; 2528 return error;
@@ -2546,9 +2578,9 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
2546 /* Restore page key for data page (s390 only). */ 2578 /* Restore page key for data page (s390 only). */
2547 page_key_write(handle->buffer); 2579 page_key_write(handle->buffer);
2548 page_key_free(); 2580 page_key_free();
2549 /* Free only if we have loaded the image entirely */ 2581 /* Do that only if we have loaded the image entirely */
2550 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2582 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2551 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); 2583 memory_bm_recycle(&orig_bm);
2552 free_highmem_data(); 2584 free_highmem_data();
2553 } 2585 }
2554} 2586}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 5b70d64b871e..0acab9d7f96f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -266,16 +266,18 @@ static int suspend_test(int level)
266 */ 266 */
267static int suspend_prepare(suspend_state_t state) 267static int suspend_prepare(suspend_state_t state)
268{ 268{
269 int error; 269 int error, nr_calls = 0;
270 270
271 if (!sleep_state_supported(state)) 271 if (!sleep_state_supported(state))
272 return -EPERM; 272 return -EPERM;
273 273
274 pm_prepare_console(); 274 pm_prepare_console();
275 275
276 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 276 error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
277 if (error) 277 if (error) {
278 nr_calls--;
278 goto Finish; 279 goto Finish;
280 }
279 281
280 trace_suspend_resume(TPS("freeze_processes"), 0, true); 282 trace_suspend_resume(TPS("freeze_processes"), 0, true);
281 error = suspend_freeze_processes(); 283 error = suspend_freeze_processes();
@@ -286,7 +288,7 @@ static int suspend_prepare(suspend_state_t state)
286 suspend_stats.failed_freeze++; 288 suspend_stats.failed_freeze++;
287 dpm_save_failed_step(SUSPEND_FREEZE); 289 dpm_save_failed_step(SUSPEND_FREEZE);
288 Finish: 290 Finish:
289 pm_notifier_call_chain(PM_POST_SUSPEND); 291 __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
290 pm_restore_console(); 292 pm_restore_console();
291 return error; 293 return error;
292} 294}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 526e8911460a..35310b627388 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -47,7 +47,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
47static int snapshot_open(struct inode *inode, struct file *filp) 47static int snapshot_open(struct inode *inode, struct file *filp)
48{ 48{
49 struct snapshot_data *data; 49 struct snapshot_data *data;
50 int error; 50 int error, nr_calls = 0;
51 51
52 if (!hibernation_available()) 52 if (!hibernation_available())
53 return -EPERM; 53 return -EPERM;
@@ -74,9 +74,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
74 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 74 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
75 data->mode = O_RDONLY; 75 data->mode = O_RDONLY;
76 data->free_bitmaps = false; 76 data->free_bitmaps = false;
77 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 77 error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
78 if (error) 78 if (error)
79 pm_notifier_call_chain(PM_POST_HIBERNATION); 79 __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
80 } else { 80 } else {
81 /* 81 /*
82 * Resuming. We may need to wait for the image device to 82 * Resuming. We may need to wait for the image device to
@@ -86,13 +86,15 @@ static int snapshot_open(struct inode *inode, struct file *filp)
86 86
87 data->swap = -1; 87 data->swap = -1;
88 data->mode = O_WRONLY; 88 data->mode = O_WRONLY;
89 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 89 error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
90 if (!error) { 90 if (!error) {
91 error = create_basic_memory_bitmaps(); 91 error = create_basic_memory_bitmaps();
92 data->free_bitmaps = !error; 92 data->free_bitmaps = !error;
93 } 93 } else
94 nr_calls--;
95
94 if (error) 96 if (error)
95 pm_notifier_call_chain(PM_POST_RESTORE); 97 __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
96 } 98 }
97 if (error) 99 if (error)
98 atomic_inc(&snapshot_device_available); 100 atomic_inc(&snapshot_device_available);