aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-06-28 21:00:51 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-01 19:52:09 -0400
commit9c744481c003697de453e8fc039468143ba604aa (patch)
treefee5b5bf27c2e4eded3a34486a7de4651af4ff92 /kernel/power
parent7b776af66dc462caa7e839cc5c950a61db1f8551 (diff)
PM / hibernate: Do not free preallocated safe pages during image restore
The core image restoration code preallocates some safe pages (ie. pages that weren't used by the image kernel before hibernation) for future use before allocating the bulk of memory for loading the image data. Those safe pages are then freed so they can be allocated again (with the memory management subsystem's help). That's done to ensure that there will be enough safe pages for temporary data structures needed during image restoration. However, it is not really necessary to free those pages after they have been allocated. They can be added to the (global) list of safe pages right away and then picked up from there when needed without freeing. That reduces the overhead related to using safe pages, especially in the arch-specific code, so modify the code accordingly. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/snapshot.c66
1 files changed, 38 insertions, 28 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3a970604308f..d9476ff877b8 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -74,6 +74,22 @@ void __init hibernate_image_size_init(void)
74 */ 74 */
75struct pbe *restore_pblist; 75struct pbe *restore_pblist;
76 76
77/* struct linked_page is used to build chains of pages */
78
79#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
80
81struct linked_page {
82 struct linked_page *next;
83 char data[LINKED_PAGE_DATA_SIZE];
84} __packed;
85
86/*
87 * List of "safe" pages (ie. pages that were not used by the image kernel
88 * before hibernation) that may be used as temporary storage for image kernel
89 * memory contents.
90 */
91static struct linked_page *safe_pages_list;
92
77/* Pointer to an auxiliary buffer (1 page) */ 93/* Pointer to an auxiliary buffer (1 page) */
78static void *buffer; 94static void *buffer;
79 95
@@ -113,9 +129,21 @@ static void *get_image_page(gfp_t gfp_mask, int safe_needed)
113 return res; 129 return res;
114} 130}
115 131
132static void *__get_safe_page(gfp_t gfp_mask)
133{
134 if (safe_pages_list) {
135 void *ret = safe_pages_list;
136
137 safe_pages_list = safe_pages_list->next;
138 memset(ret, 0, PAGE_SIZE);
139 return ret;
140 }
141 return get_image_page(gfp_mask, PG_SAFE);
142}
143
116unsigned long get_safe_page(gfp_t gfp_mask) 144unsigned long get_safe_page(gfp_t gfp_mask)
117{ 145{
118 return (unsigned long)get_image_page(gfp_mask, PG_SAFE); 146 return (unsigned long)__get_safe_page(gfp_mask);
119} 147}
120 148
121static struct page *alloc_image_page(gfp_t gfp_mask) 149static struct page *alloc_image_page(gfp_t gfp_mask)
@@ -150,15 +178,6 @@ static inline void free_image_page(void *addr, int clear_nosave_free)
150 __free_page(page); 178 __free_page(page);
151} 179}
152 180
153/* struct linked_page is used to build chains of pages */
154
155#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
156
157struct linked_page {
158 struct linked_page *next;
159 char data[LINKED_PAGE_DATA_SIZE];
160} __packed;
161
162static inline void 181static inline void
163free_list_of_pages(struct linked_page *list, int clear_page_nosave) 182free_list_of_pages(struct linked_page *list, int clear_page_nosave)
164{ 183{
@@ -208,7 +227,8 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
208 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 227 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
209 struct linked_page *lp; 228 struct linked_page *lp;
210 229
211 lp = get_image_page(ca->gfp_mask, ca->safe_needed); 230 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
231 get_image_page(ca->gfp_mask, PG_ANY);
212 if (!lp) 232 if (!lp)
213 return NULL; 233 return NULL;
214 234
@@ -2104,11 +2124,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2104 return 0; 2124 return 0;
2105} 2125}
2106 2126
2107/* List of "safe" pages that may be used to store data loaded from the suspend
2108 * image
2109 */
2110static struct linked_page *safe_pages_list;
2111
2112#ifdef CONFIG_HIGHMEM 2127#ifdef CONFIG_HIGHMEM
2113/* struct highmem_pbe is used for creating the list of highmem pages that 2128/* struct highmem_pbe is used for creating the list of highmem pages that
2114 * should be restored atomically during the resume from disk, because the page 2129 * should be restored atomically during the resume from disk, because the page
@@ -2334,7 +2349,7 @@ static int
2334prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2349prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2335{ 2350{
2336 unsigned int nr_pages, nr_highmem; 2351 unsigned int nr_pages, nr_highmem;
2337 struct linked_page *sp_list, *lp; 2352 struct linked_page *lp;
2338 int error; 2353 int error;
2339 2354
2340 /* If there is no highmem, the buffer will not be necessary */ 2355 /* If there is no highmem, the buffer will not be necessary */
@@ -2362,9 +2377,9 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2362 * NOTE: This way we make sure there will be enough safe pages for the 2377 * NOTE: This way we make sure there will be enough safe pages for the
2363 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2378 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2364 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2379 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2380 *
2381 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2365 */ 2382 */
2366 sp_list = NULL;
2367 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2368 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2383 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2369 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2384 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2370 while (nr_pages > 0) { 2385 while (nr_pages > 0) {
@@ -2373,12 +2388,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2373 error = -ENOMEM; 2388 error = -ENOMEM;
2374 goto Free; 2389 goto Free;
2375 } 2390 }
2376 lp->next = sp_list; 2391 lp->next = safe_pages_list;
2377 sp_list = lp; 2392 safe_pages_list = lp;
2378 nr_pages--; 2393 nr_pages--;
2379 } 2394 }
2380 /* Preallocate memory for the image */ 2395 /* Preallocate memory for the image */
2381 safe_pages_list = NULL;
2382 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2396 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2383 while (nr_pages > 0) { 2397 while (nr_pages > 0) {
2384 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2398 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
@@ -2396,12 +2410,6 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2396 swsusp_set_page_free(virt_to_page(lp)); 2410 swsusp_set_page_free(virt_to_page(lp));
2397 nr_pages--; 2411 nr_pages--;
2398 } 2412 }
2399 /* Free the reserved safe pages so that chain_alloc() can use them */
2400 while (sp_list) {
2401 lp = sp_list->next;
2402 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2403 sp_list = lp;
2404 }
2405 return 0; 2413 return 0;
2406 2414
2407 Free: 2415 Free:
@@ -2491,6 +2499,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
2491 if (error) 2499 if (error)
2492 return error; 2500 return error;
2493 2501
2502 safe_pages_list = NULL;
2503
2494 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY); 2504 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2495 if (error) 2505 if (error)
2496 return error; 2506 return error;