diff options
Diffstat (limited to 'kernel/power/swap.c')
-rw-r--r-- | kernel/power/swap.c | 84 |
1 files changed, 58 insertions, 26 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8742fd013a94..11e22c068e8b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> | 9 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
@@ -51,6 +51,23 @@ | |||
51 | 51 | ||
52 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) | 52 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
53 | 53 | ||
54 | /* | ||
55 | * Number of free pages that are not high. | ||
56 | */ | ||
57 | static inline unsigned long low_free_pages(void) | ||
58 | { | ||
59 | return nr_free_pages() - nr_free_highpages(); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Number of pages required to be kept free while writing the image. Always | ||
64 | * half of all available low pages before the writing starts. | ||
65 | */ | ||
66 | static inline unsigned long reqd_free_pages(void) | ||
67 | { | ||
68 | return low_free_pages() / 2; | ||
69 | } | ||
70 | |||
54 | struct swap_map_page { | 71 | struct swap_map_page { |
55 | sector_t entries[MAP_PAGE_ENTRIES]; | 72 | sector_t entries[MAP_PAGE_ENTRIES]; |
56 | sector_t next_swap; | 73 | sector_t next_swap; |
@@ -72,7 +89,7 @@ struct swap_map_handle { | |||
72 | sector_t cur_swap; | 89 | sector_t cur_swap; |
73 | sector_t first_sector; | 90 | sector_t first_sector; |
74 | unsigned int k; | 91 | unsigned int k; |
75 | unsigned long nr_free_pages, written; | 92 | unsigned long reqd_free_pages; |
76 | u32 crc32; | 93 | u32 crc32; |
77 | }; | 94 | }; |
78 | 95 | ||
@@ -265,14 +282,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | |||
265 | return -ENOSPC; | 282 | return -ENOSPC; |
266 | 283 | ||
267 | if (bio_chain) { | 284 | if (bio_chain) { |
268 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 285 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | |
286 | __GFP_NORETRY); | ||
269 | if (src) { | 287 | if (src) { |
270 | copy_page(src, buf); | 288 | copy_page(src, buf); |
271 | } else { | 289 | } else { |
272 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ | 290 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ |
273 | if (ret) | 291 | if (ret) |
274 | return ret; | 292 | return ret; |
275 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 293 | src = (void *)__get_free_page(__GFP_WAIT | |
294 | __GFP_NOWARN | | ||
295 | __GFP_NORETRY); | ||
276 | if (src) { | 296 | if (src) { |
277 | copy_page(src, buf); | 297 | copy_page(src, buf); |
278 | } else { | 298 | } else { |
@@ -316,8 +336,7 @@ static int get_swap_writer(struct swap_map_handle *handle) | |||
316 | goto err_rel; | 336 | goto err_rel; |
317 | } | 337 | } |
318 | handle->k = 0; | 338 | handle->k = 0; |
319 | handle->nr_free_pages = nr_free_pages() >> 1; | 339 | handle->reqd_free_pages = reqd_free_pages(); |
320 | handle->written = 0; | ||
321 | handle->first_sector = handle->cur_swap; | 340 | handle->first_sector = handle->cur_swap; |
322 | return 0; | 341 | return 0; |
323 | err_rel: | 342 | err_rel: |
@@ -351,12 +370,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, | |||
351 | clear_page(handle->cur); | 370 | clear_page(handle->cur); |
352 | handle->cur_swap = offset; | 371 | handle->cur_swap = offset; |
353 | handle->k = 0; | 372 | handle->k = 0; |
354 | } | 373 | |
355 | if (bio_chain && ++handle->written > handle->nr_free_pages) { | 374 | if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { |
356 | error = hib_wait_on_bio_chain(bio_chain); | 375 | error = hib_wait_on_bio_chain(bio_chain); |
357 | if (error) | 376 | if (error) |
358 | goto out; | 377 | goto out; |
359 | handle->written = 0; | 378 | /* |
379 | * Recalculate the number of required free pages, to | ||
380 | * make sure we never take more than half. | ||
381 | */ | ||
382 | handle->reqd_free_pages = reqd_free_pages(); | ||
383 | } | ||
360 | } | 384 | } |
361 | out: | 385 | out: |
362 | return error; | 386 | return error; |
@@ -403,8 +427,9 @@ static int swap_writer_finish(struct swap_map_handle *handle, | |||
403 | /* Maximum number of threads for compression/decompression. */ | 427 | /* Maximum number of threads for compression/decompression. */ |
404 | #define LZO_THREADS 3 | 428 | #define LZO_THREADS 3 |
405 | 429 | ||
406 | /* Maximum number of pages for read buffering. */ | 430 | /* Minimum/maximum number of pages for read buffering. */ |
407 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) | 431 | #define LZO_MIN_RD_PAGES 1024 |
432 | #define LZO_MAX_RD_PAGES 8192 | ||
408 | 433 | ||
409 | 434 | ||
410 | /** | 435 | /** |
@@ -615,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
615 | } | 640 | } |
616 | 641 | ||
617 | /* | 642 | /* |
618 | * Adjust number of free pages after all allocations have been done. | ||
619 | * We don't want to run out of pages when writing. | ||
620 | */ | ||
621 | handle->nr_free_pages = nr_free_pages() >> 1; | ||
622 | |||
623 | /* | ||
624 | * Start the CRC32 thread. | 643 | * Start the CRC32 thread. |
625 | */ | 644 | */ |
626 | init_waitqueue_head(&crc->go); | 645 | init_waitqueue_head(&crc->go); |
@@ -641,6 +660,12 @@ static int save_image_lzo(struct swap_map_handle *handle, | |||
641 | goto out_clean; | 660 | goto out_clean; |
642 | } | 661 | } |
643 | 662 | ||
663 | /* | ||
664 | * Adjust the number of required free pages after all allocations have | ||
665 | * been done. We don't want to run out of pages when writing. | ||
666 | */ | ||
667 | handle->reqd_free_pages = reqd_free_pages(); | ||
668 | |||
644 | printk(KERN_INFO | 669 | printk(KERN_INFO |
645 | "PM: Using %u thread(s) for compression.\n" | 670 | "PM: Using %u thread(s) for compression.\n" |
646 | "PM: Compressing and saving image data (%u pages) ... ", | 671 | "PM: Compressing and saving image data (%u pages) ... ", |
@@ -1051,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1051 | unsigned i, thr, run_threads, nr_threads; | 1076 | unsigned i, thr, run_threads, nr_threads; |
1052 | unsigned ring = 0, pg = 0, ring_size = 0, | 1077 | unsigned ring = 0, pg = 0, ring_size = 0, |
1053 | have = 0, want, need, asked = 0; | 1078 | have = 0, want, need, asked = 0; |
1054 | unsigned long read_pages; | 1079 | unsigned long read_pages = 0; |
1055 | unsigned char **page = NULL; | 1080 | unsigned char **page = NULL; |
1056 | struct dec_data *data = NULL; | 1081 | struct dec_data *data = NULL; |
1057 | struct crc_data *crc = NULL; | 1082 | struct crc_data *crc = NULL; |
@@ -1063,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1063 | nr_threads = num_online_cpus() - 1; | 1088 | nr_threads = num_online_cpus() - 1; |
1064 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | 1089 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
1065 | 1090 | ||
1066 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); | 1091 | page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); |
1067 | if (!page) { | 1092 | if (!page) { |
1068 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1093 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
1069 | ret = -ENOMEM; | 1094 | ret = -ENOMEM; |
@@ -1128,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle, | |||
1128 | } | 1153 | } |
1129 | 1154 | ||
1130 | /* | 1155 | /* |
1131 | * Adjust number of pages for read buffering, in case we are short. | 1156 | * Set the number of pages for read buffering. |
1157 | * This is complete guesswork, because we'll only know the real | ||
1158 | * picture once prepare_image() is called, which is much later on | ||
1159 | * during the image load phase. We'll assume the worst case and | ||
1160 | * say that none of the image pages are from high memory. | ||
1132 | */ | 1161 | */ |
1133 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; | 1162 | if (low_free_pages() > snapshot_get_image_size()) |
1134 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); | 1163 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; |
1164 | read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); | ||
1135 | 1165 | ||
1136 | for (i = 0; i < read_pages; i++) { | 1166 | for (i = 0; i < read_pages; i++) { |
1137 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | 1167 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? |
1138 | __GFP_WAIT | __GFP_HIGH : | 1168 | __GFP_WAIT | __GFP_HIGH : |
1139 | __GFP_WAIT); | 1169 | __GFP_WAIT | __GFP_NOWARN | |
1170 | __GFP_NORETRY); | ||
1171 | |||
1140 | if (!page[i]) { | 1172 | if (!page[i]) { |
1141 | if (i < LZO_CMP_PAGES) { | 1173 | if (i < LZO_CMP_PAGES) { |
1142 | ring_size = i; | 1174 | ring_size = i; |