diff options
-rw-r--r-- | kernel/power/swap.c | 62 |
1 files changed, 39 insertions, 23 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index eef311a58a64..11e22c068e8b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> | 9 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
@@ -282,14 +282,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | |||
282 | return -ENOSPC; | 282 | return -ENOSPC; |
283 | 283 | ||
284 | if (bio_chain) { | 284 | if (bio_chain) { |
285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | |
286 | __GFP_NORETRY); | ||
286 | if (src) { | 287 | if (src) { |
287 | copy_page(src, buf); | 288 | copy_page(src, buf); |
288 | } else { | 289 | } else { |
289 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ | 290 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ |
290 | if (ret) | 291 | if (ret) |
291 | return ret; | 292 | return ret; |
292 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 293 | src = (void *)__get_free_page(__GFP_WAIT | |
294 | __GFP_NOWARN | | ||
295 | __GFP_NORETRY); | ||
293 | if (src) { | 296 | if (src) { |
294 | copy_page(src, buf); | 297 | copy_page(src, buf); |
295 | } else { | 298 | } else { |
@@ -367,12 +370,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, | |||
367 | clear_page(handle->cur); | 370 | clear_page(handle->cur); |
368 | handle->cur_swap = offset; | 371 | handle->cur_swap = offset; |
369 | handle->k = 0; | 372 | handle->k = 0; |
370 | } | 373 | |
371 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { | 374 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { |
372 | error = hib_wait_on_bio_chain(bio_chain); | 375 | error = hib_wait_on_bio_chain(bio_chain); |
373 | if (error) | 376 | if (error) |
374 | goto out; | 377 | goto out; |
375 | handle->reqd_free_pages = reqd_free_pages(); | 378 | /* |
379 | * Recalculate the number of required free pages, to | ||
380 | * make sure we never take more than half. | ||
381 | */ | ||
382 | handle->reqd_free_pages = reqd_free_pages(); | ||
383 | } | ||
376 | } | 384 | } |
377 | out: | 385 | out: |
378 | return error; | 386 | return error; |
@@ -419,8 +427,9 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
419 | /* Maximum number of threads for compression/decompression. */ | 427 | /* Maximum number of threads for compression/decompression. */ |
420 | #define LZO_THREADS 3 | 428 | #define LZO_THREADS 3 |
421 | 429 | ||
422 | /* Maximum number of pages for read buffering. */ | 430 | /* Minimum/maximum number of pages for read buffering. */ |
423 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) | 431 | #define LZO_MIN_RD_PAGES 1024 |
432 | #define LZO_MAX_RD_PAGES 8192 | ||
424 | 433 | ||
425 | 434 | ||
426 | /** | 435 | /** |
@@ -631,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
631 | } | 640 | } |
632 | 641 | ||
633 | /* | 642 | /* |
634 | * Adjust number of free pages after all allocations have been done. | ||
635 | * We don't want to run out of pages when writing. | ||
636 | */ | ||
637 | handle->reqd_free_pages = reqd_free_pages(); | ||
638 | |||
639 | /* | ||
640 | * Start the CRC32 thread. | 643 | * Start the CRC32 thread. |
641 | */ | 644 | */ |
642 | init_waitqueue_head(&crc->go); | 645 | init_waitqueue_head(&crc->go); |
@@ -657,6 +660,12 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
657 | goto out_clean; | 660 | goto out_clean; |
658 | } | 661 | } |
659 | 662 | ||
663 | /* | ||
664 | * Adjust the number of required free pages after all allocations have | ||
665 | * been done. We don't want to run out of pages when writing. | ||
666 | */ | ||
667 | handle->reqd_free_pages = reqd_free_pages(); | ||
668 | |||
660 | printk(KERN_INFO | 669 | printk(KERN_INFO |
661 | "PM: Using %u thread(s) for compression.\n" | 670 | "PM: Using %u thread(s) for compression.\n" |
662 | "PM: Compressing and saving image data (%u pages) ... ", | 671 | "PM: Compressing and saving image data (%u pages) ... ", |
@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1067 | unsigned i, thr, run_threads, nr_threads; | 1076 | unsigned i, thr, run_threads, nr_threads; |
1068 | unsigned ring = 0, pg = 0, ring_size = 0, | 1077 | unsigned ring = 0, pg = 0, ring_size = 0, |
1069 | have = 0, want, need, asked = 0; | 1078 | have = 0, want, need, asked = 0; |
1070 | unsigned long read_pages; | 1079 | unsigned long read_pages = 0; |
1071 | unsigned char **page = NULL; | 1080 | unsigned char **page = NULL; |
1072 | struct dec_data *data = NULL; | 1081 | struct dec_data *data = NULL; |
1073 | struct crc_data *crc = NULL; | 1082 | struct crc_data *crc = NULL; |
@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1079 | nr_threads = num_online_cpus() - 1; | 1088 | nr_threads = num_online_cpus() - 1; |
1080 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | 1089 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
1081 | 1090 | ||
1082 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); | 1091 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); |
1083 | if (!page) { | 1092 | if (!page) { |
1084 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1093 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
1085 | ret = -ENOMEM; | 1094 | ret = -ENOMEM; |
@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1144 | } | 1153 | } |
1145 | 1154 | ||
1146 | /* | 1155 | /* |
1147 | * Adjust number of pages for read buffering, in case we are short. | 1156 | * Set the number of pages for read buffering. |
1157 | * This is complete guesswork, because we'll only know the real | ||
1158 | * picture once prepare_image() is called, which is much later on | ||
1159 | * during the image load phase. We'll assume the worst case and | ||
1160 | * say that none of the image pages are from high memory. | ||
1148 | */ | 1161 | */ |
1149 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; | 1162 | if (low_free_pages() > snapshot_get_image_size()) |
1150 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); | 1163 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; |
1164 | read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); | ||
1151 | 1165 | ||
1152 | for (i = 0; i < read_pages; i++) { | 1166 | for (i = 0; i < read_pages; i++) { |
1153 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | 1167 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? |
1154 | __GFP_WAIT | __GFP_HIGH : | 1168 | __GFP_WAIT | __GFP_HIGH : |
1155 | __GFP_WAIT); | 1169 | __GFP_WAIT | __GFP_NOWARN | |
1170 | __GFP_NORETRY); | ||
1171 | |||
1156 | if (!page[i]) { | 1172 | if (!page[i]) { |
1157 | if (i < LZO_CMP_PAGES) { | 1173 | if (i < LZO_CMP_PAGES) { |
1158 | ring_size = i; | 1174 | ring_size = i; |