aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swap.c
diff options
context:
space:
mode:
authorBojan Smojver <bojan@rexursive.com>2012-04-24 17:53:28 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2012-04-24 17:53:28 -0400
commitf8262d476823a7ea1eb497ff9676d1eab2393c75 (patch)
treedfe76c8ef2b3fc700d9550b85b69e414ef415de6 /kernel/power/swap.c
parent66f75a5d028beaf67c931435fdc3e7823125730c (diff)
PM / Hibernate: fix the number of pages used for hibernate/thaw buffering
Hibernation regression fix, since 3.2. Calculate the number of required free pages based on non-high memory pages only, because that is where the buffers will come from. Commit 081a9d043c983f161b78fdc4671324d1342b86bc introduced a new buffer page allocation logic during hibernation, in order to improve the performance. The amount of pages allocated was calculated based on total amount of pages available, although only non-high memory pages are usable for this purpose. This caused hibernation code to attempt to over allocate pages on platforms that have high memory, which led to hangs. Signed-off-by: Bojan Smojver <bojan@rexursive.com> Signed-off-by: Rafael J. Wysocki <rjw@suse.de>
Diffstat (limited to 'kernel/power/swap.c')
-rw-r--r--kernel/power/swap.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd013a9..eef311a58a6 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
51 51
52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 52#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
53 53
54/*
55 * Number of free pages that are not high.
56 */
57static inline unsigned long low_free_pages(void)
58{
59 return nr_free_pages() - nr_free_highpages();
60}
61
62/*
63 * Number of pages required to be kept free while writing the image. Always
64 * half of all available low pages before the writing starts.
65 */
66static inline unsigned long reqd_free_pages(void)
67{
68 return low_free_pages() / 2;
69}
70
54struct swap_map_page { 71struct swap_map_page {
55 sector_t entries[MAP_PAGE_ENTRIES]; 72 sector_t entries[MAP_PAGE_ENTRIES];
56 sector_t next_swap; 73 sector_t next_swap;
@@ -72,7 +89,7 @@ struct swap_map_handle {
72 sector_t cur_swap; 89 sector_t cur_swap;
73 sector_t first_sector; 90 sector_t first_sector;
74 unsigned int k; 91 unsigned int k;
75 unsigned long nr_free_pages, written; 92 unsigned long reqd_free_pages;
76 u32 crc32; 93 u32 crc32;
77}; 94};
78 95
@@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
316 goto err_rel; 333 goto err_rel;
317 } 334 }
318 handle->k = 0; 335 handle->k = 0;
319 handle->nr_free_pages = nr_free_pages() >> 1; 336 handle->reqd_free_pages = reqd_free_pages();
320 handle->written = 0;
321 handle->first_sector = handle->cur_swap; 337 handle->first_sector = handle->cur_swap;
322 return 0; 338 return 0;
323err_rel: 339err_rel:
@@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
352 handle->cur_swap = offset; 368 handle->cur_swap = offset;
353 handle->k = 0; 369 handle->k = 0;
354 } 370 }
355 if (bio_chain && ++handle->written > handle->nr_free_pages) { 371 if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
356 error = hib_wait_on_bio_chain(bio_chain); 372 error = hib_wait_on_bio_chain(bio_chain);
357 if (error) 373 if (error)
358 goto out; 374 goto out;
359 handle->written = 0; 375 handle->reqd_free_pages = reqd_free_pages();
360 } 376 }
361 out: 377 out:
362 return error; 378 return error;
@@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
618 * Adjust number of free pages after all allocations have been done. 634 * Adjust number of free pages after all allocations have been done.
619 * We don't want to run out of pages when writing. 635 * We don't want to run out of pages when writing.
620 */ 636 */
621 handle->nr_free_pages = nr_free_pages() >> 1; 637 handle->reqd_free_pages = reqd_free_pages();
622 638
623 /* 639 /*
624 * Start the CRC32 thread. 640 * Start the CRC32 thread.