aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorBojan Smojver <bojan@rexursive.com>2011-10-13 17:58:07 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-10-16 17:30:38 -0400
commit081a9d043c983f161b78fdc4671324d1342b86bc (patch)
tree4382d88e5705b18a95471af2a71319401c47744f /kernel/power
parentd231ff1af70a2df43d809173cf8c94e9c3beb853 (diff)
PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image
Use threads for LZO compression/decompression on hibernate/thaw. Improve buffering on hibernate/thaw. Calculate/verify CRC32 of the image pages on hibernate/thaw. In my testing, this improved write/read speed by a factor of about two. Signed-off-by: Bojan Smojver <bojan@rexursive.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/hibernate.c3
-rw-r--r--kernel/power/power.h1
-rw-r--r--kernel/power/swap.c818
4 files changed, 645 insertions, 178 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e01e6899592c..cedd9982306a 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -27,6 +27,7 @@ config HIBERNATION
27 select HIBERNATE_CALLBACKS 27 select HIBERNATE_CALLBACKS
28 select LZO_COMPRESS 28 select LZO_COMPRESS
29 select LZO_DECOMPRESS 29 select LZO_DECOMPRESS
30 select CRC32
30 ---help--- 31 ---help---
31 Enable the suspend to disk (STD) functionality, which is usually 32 Enable the suspend to disk (STD) functionality, which is usually
32 called "hibernation" in user interfaces. STD checkpoints the 33 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index ea12c8f1bdfd..1c53f7fad5f7 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -657,6 +657,9 @@ int hibernate(void)
657 flags |= SF_PLATFORM_MODE; 657 flags |= SF_PLATFORM_MODE;
658 if (nocompress) 658 if (nocompress)
659 flags |= SF_NOCOMPRESS_MODE; 659 flags |= SF_NOCOMPRESS_MODE;
660 else
661 flags |= SF_CRC32_MODE;
662
660 pr_debug("PM: writing image.\n"); 663 pr_debug("PM: writing image.\n");
661 error = swsusp_write(flags); 664 error = swsusp_write(flags);
662 swsusp_free(); 665 swsusp_free();
diff --git a/kernel/power/power.h b/kernel/power/power.h
index e6206397ce67..23a2db1ec442 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -146,6 +146,7 @@ extern int swsusp_swap_in_use(void);
146 */ 146 */
147#define SF_PLATFORM_MODE 1 147#define SF_PLATFORM_MODE 1
148#define SF_NOCOMPRESS_MODE 2 148#define SF_NOCOMPRESS_MODE 2
149#define SF_CRC32_MODE 4
149 150
150/* kernel/power/hibernate.c */ 151/* kernel/power/hibernate.c */
151extern int swsusp_check(void); 152extern int swsusp_check(void);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 7c97c3a0eee3..11a594c4ba25 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -27,6 +27,10 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/lzo.h> 28#include <linux/lzo.h>
29#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30#include <linux/cpumask.h>
31#include <linux/atomic.h>
32#include <linux/kthread.h>
33#include <linux/crc32.h>
30 34
31#include "power.h" 35#include "power.h"
32 36
@@ -43,8 +47,7 @@
43 * allocated and populated one at a time, so we only need one memory 47 * allocated and populated one at a time, so we only need one memory
44 * page to set up the entire structure. 48 * page to set up the entire structure.
45 * 49 *
46 * During resume we also only need to use one swap_map_page structure 50 * During resume we pick up all swap_map_page structures into a list.
47 * at a time.
48 */ 51 */
49 52
50#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) 53#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
@@ -54,6 +57,11 @@ struct swap_map_page {
54 sector_t next_swap; 57 sector_t next_swap;
55}; 58};
56 59
60struct swap_map_page_list {
61 struct swap_map_page *map;
62 struct swap_map_page_list *next;
63};
64
57/** 65/**
58 * The swap_map_handle structure is used for handling swap in 66 * The swap_map_handle structure is used for handling swap in
59 * a file-alike way 67 * a file-alike way
@@ -61,13 +69,18 @@ struct swap_map_page {
61 69
62struct swap_map_handle { 70struct swap_map_handle {
63 struct swap_map_page *cur; 71 struct swap_map_page *cur;
72 struct swap_map_page_list *maps;
64 sector_t cur_swap; 73 sector_t cur_swap;
65 sector_t first_sector; 74 sector_t first_sector;
66 unsigned int k; 75 unsigned int k;
76 unsigned long nr_free_pages, written;
77 u32 crc32;
67}; 78};
68 79
69struct swsusp_header { 80struct swsusp_header {
70 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)]; 81 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
82 sizeof(u32)];
83 u32 crc32;
71 sector_t image; 84 sector_t image;
72 unsigned int flags; /* Flags to pass to the "boot" kernel */ 85 unsigned int flags; /* Flags to pass to the "boot" kernel */
73 char orig_sig[10]; 86 char orig_sig[10];
@@ -199,6 +212,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
199 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); 212 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
200 swsusp_header->image = handle->first_sector; 213 swsusp_header->image = handle->first_sector;
201 swsusp_header->flags = flags; 214 swsusp_header->flags = flags;
215 if (flags & SF_CRC32_MODE)
216 swsusp_header->crc32 = handle->crc32;
202 error = hib_bio_write_page(swsusp_resume_block, 217 error = hib_bio_write_page(swsusp_resume_block,
203 swsusp_header, NULL); 218 swsusp_header, NULL);
204 } else { 219 } else {
@@ -245,6 +260,7 @@ static int swsusp_swap_check(void)
245static int write_page(void *buf, sector_t offset, struct bio **bio_chain) 260static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
246{ 261{
247 void *src; 262 void *src;
263 int ret;
248 264
249 if (!offset) 265 if (!offset)
250 return -ENOSPC; 266 return -ENOSPC;
@@ -254,9 +270,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
254 if (src) { 270 if (src) {
255 copy_page(src, buf); 271 copy_page(src, buf);
256 } else { 272 } else {
257 WARN_ON_ONCE(1); 273 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
258 bio_chain = NULL; /* Go synchronous */ 274 if (ret)
259 src = buf; 275 return ret;
276 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
277 if (src) {
278 copy_page(src, buf);
279 } else {
280 WARN_ON_ONCE(1);
281 bio_chain = NULL; /* Go synchronous */
282 src = buf;
283 }
260 } 284 }
261 } else { 285 } else {
262 src = buf; 286 src = buf;
@@ -293,6 +317,8 @@ static int get_swap_writer(struct swap_map_handle *handle)
293 goto err_rel; 317 goto err_rel;
294 } 318 }
295 handle->k = 0; 319 handle->k = 0;
320 handle->nr_free_pages = nr_free_pages() >> 1;
321 handle->written = 0;
296 handle->first_sector = handle->cur_swap; 322 handle->first_sector = handle->cur_swap;
297 return 0; 323 return 0;
298err_rel: 324err_rel:
@@ -316,20 +342,23 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
316 return error; 342 return error;
317 handle->cur->entries[handle->k++] = offset; 343 handle->cur->entries[handle->k++] = offset;
318 if (handle->k >= MAP_PAGE_ENTRIES) { 344 if (handle->k >= MAP_PAGE_ENTRIES) {
319 error = hib_wait_on_bio_chain(bio_chain);
320 if (error)
321 goto out;
322 offset = alloc_swapdev_block(root_swap); 345 offset = alloc_swapdev_block(root_swap);
323 if (!offset) 346 if (!offset)
324 return -ENOSPC; 347 return -ENOSPC;
325 handle->cur->next_swap = offset; 348 handle->cur->next_swap = offset;
326 error = write_page(handle->cur, handle->cur_swap, NULL); 349 error = write_page(handle->cur, handle->cur_swap, bio_chain);
327 if (error) 350 if (error)
328 goto out; 351 goto out;
329 clear_page(handle->cur); 352 clear_page(handle->cur);
330 handle->cur_swap = offset; 353 handle->cur_swap = offset;
331 handle->k = 0; 354 handle->k = 0;
332 } 355 }
356 if (bio_chain && ++handle->written > handle->nr_free_pages) {
357 error = hib_wait_on_bio_chain(bio_chain);
358 if (error)
359 goto out;
360 handle->written = 0;
361 }
333 out: 362 out:
334 return error; 363 return error;
335} 364}
@@ -372,6 +401,13 @@ static int swap_writer_finish(struct swap_map_handle *handle,
372 LZO_HEADER, PAGE_SIZE) 401 LZO_HEADER, PAGE_SIZE)
373#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) 402#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
374 403
404/* Maximum number of threads for compression/decompression. */
405#define LZO_THREADS 3
406
407/* Maximum number of pages for read buffering. */
408#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
409
410
375/** 411/**
376 * save_image - save the suspend image data 412 * save_image - save the suspend image data
377 */ 413 */
@@ -419,6 +455,92 @@ static int save_image(struct swap_map_handle *handle,
419 return ret; 455 return ret;
420} 456}
421 457
458/**
459 * Structure used for CRC32.
460 */
461struct crc_data {
462 struct task_struct *thr; /* thread */
463 atomic_t ready; /* ready to start flag */
464 atomic_t stop; /* ready to stop flag */
465 unsigned run_threads; /* nr current threads */
466 wait_queue_head_t go; /* start crc update */
467 wait_queue_head_t done; /* crc update done */
468 u32 *crc32; /* points to handle's crc32 */
469 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
470 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
471};
472
473/**
474 * CRC32 update function that runs in its own thread.
475 */
476static int crc32_threadfn(void *data)
477{
478 struct crc_data *d = data;
479 unsigned i;
480
481 while (1) {
482 wait_event(d->go, atomic_read(&d->ready) ||
483 kthread_should_stop());
484 if (kthread_should_stop()) {
485 d->thr = NULL;
486 atomic_set(&d->stop, 1);
487 wake_up(&d->done);
488 break;
489 }
490 atomic_set(&d->ready, 0);
491
492 for (i = 0; i < d->run_threads; i++)
493 *d->crc32 = crc32_le(*d->crc32,
494 d->unc[i], *d->unc_len[i]);
495 atomic_set(&d->stop, 1);
496 wake_up(&d->done);
497 }
498 return 0;
499}
500/**
501 * Structure used for LZO data compression.
502 */
503struct cmp_data {
504 struct task_struct *thr; /* thread */
505 atomic_t ready; /* ready to start flag */
506 atomic_t stop; /* ready to stop flag */
507 int ret; /* return code */
508 wait_queue_head_t go; /* start compression */
509 wait_queue_head_t done; /* compression done */
510 size_t unc_len; /* uncompressed length */
511 size_t cmp_len; /* compressed length */
512 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
513 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
514 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
515};
516
517/**
518 * Compression function that runs in its own thread.
519 */
520static int lzo_compress_threadfn(void *data)
521{
522 struct cmp_data *d = data;
523
524 while (1) {
525 wait_event(d->go, atomic_read(&d->ready) ||
526 kthread_should_stop());
527 if (kthread_should_stop()) {
528 d->thr = NULL;
529 d->ret = -1;
530 atomic_set(&d->stop, 1);
531 wake_up(&d->done);
532 break;
533 }
534 atomic_set(&d->ready, 0);
535
536 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
537 d->cmp + LZO_HEADER, &d->cmp_len,
538 d->wrk);
539 atomic_set(&d->stop, 1);
540 wake_up(&d->done);
541 }
542 return 0;
543}
422 544
423/** 545/**
424 * save_image_lzo - Save the suspend image data compressed with LZO. 546 * save_image_lzo - Save the suspend image data compressed with LZO.
@@ -437,42 +559,93 @@ static int save_image_lzo(struct swap_map_handle *handle,
437 struct bio *bio; 559 struct bio *bio;
438 struct timeval start; 560 struct timeval start;
439 struct timeval stop; 561 struct timeval stop;
440 size_t off, unc_len, cmp_len; 562 size_t off;
441 unsigned char *unc, *cmp, *wrk, *page; 563 unsigned thr, run_threads, nr_threads;
564 unsigned char *page = NULL;
565 struct cmp_data *data = NULL;
566 struct crc_data *crc = NULL;
567
568 /*
569 * We'll limit the number of threads for compression to limit memory
570 * footprint.
571 */
572 nr_threads = num_online_cpus() - 1;
573 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
442 574
443 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 575 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
444 if (!page) { 576 if (!page) {
445 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 577 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
446 return -ENOMEM; 578 ret = -ENOMEM;
579 goto out_clean;
447 } 580 }
448 581
449 wrk = vmalloc(LZO1X_1_MEM_COMPRESS); 582 data = vmalloc(sizeof(*data) * nr_threads);
450 if (!wrk) { 583 if (!data) {
451 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n"); 584 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
452 free_page((unsigned long)page); 585 ret = -ENOMEM;
453 return -ENOMEM; 586 goto out_clean;
454 } 587 }
588 for (thr = 0; thr < nr_threads; thr++)
589 memset(&data[thr], 0, offsetof(struct cmp_data, go));
455 590
456 unc = vmalloc(LZO_UNC_SIZE); 591 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
457 if (!unc) { 592 if (!crc) {
458 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 593 printk(KERN_ERR "PM: Failed to allocate crc\n");
459 vfree(wrk); 594 ret = -ENOMEM;
460 free_page((unsigned long)page); 595 goto out_clean;
461 return -ENOMEM; 596 }
597 memset(crc, 0, offsetof(struct crc_data, go));
598
599 /*
600 * Start the compression threads.
601 */
602 for (thr = 0; thr < nr_threads; thr++) {
603 init_waitqueue_head(&data[thr].go);
604 init_waitqueue_head(&data[thr].done);
605
606 data[thr].thr = kthread_run(lzo_compress_threadfn,
607 &data[thr],
608 "image_compress/%u", thr);
609 if (IS_ERR(data[thr].thr)) {
610 data[thr].thr = NULL;
611 printk(KERN_ERR
612 "PM: Cannot start compression threads\n");
613 ret = -ENOMEM;
614 goto out_clean;
615 }
462 } 616 }
463 617
464 cmp = vmalloc(LZO_CMP_SIZE); 618 /*
465 if (!cmp) { 619 * Adjust number of free pages after all allocations have been done.
466 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 620 * We don't want to run out of pages when writing.
467 vfree(unc); 621 */
468 vfree(wrk); 622 handle->nr_free_pages = nr_free_pages() >> 1;
469 free_page((unsigned long)page); 623
470 return -ENOMEM; 624 /*
625 * Start the CRC32 thread.
626 */
627 init_waitqueue_head(&crc->go);
628 init_waitqueue_head(&crc->done);
629
630 handle->crc32 = 0;
631 crc->crc32 = &handle->crc32;
632 for (thr = 0; thr < nr_threads; thr++) {
633 crc->unc[thr] = data[thr].unc;
634 crc->unc_len[thr] = &data[thr].unc_len;
635 }
636
637 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
638 if (IS_ERR(crc->thr)) {
639 crc->thr = NULL;
640 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
641 ret = -ENOMEM;
642 goto out_clean;
471 } 643 }
472 644
473 printk(KERN_INFO 645 printk(KERN_INFO
646 "PM: Using %u thread(s) for compression.\n"
474 "PM: Compressing and saving image data (%u pages) ... ", 647 "PM: Compressing and saving image data (%u pages) ... ",
475 nr_to_write); 648 nr_threads, nr_to_write);
476 m = nr_to_write / 100; 649 m = nr_to_write / 100;
477 if (!m) 650 if (!m)
478 m = 1; 651 m = 1;
@@ -480,55 +653,83 @@ static int save_image_lzo(struct swap_map_handle *handle,
480 bio = NULL; 653 bio = NULL;
481 do_gettimeofday(&start); 654 do_gettimeofday(&start);
482 for (;;) { 655 for (;;) {
483 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { 656 for (thr = 0; thr < nr_threads; thr++) {
484 ret = snapshot_read_next(snapshot); 657 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
485 if (ret < 0) 658 ret = snapshot_read_next(snapshot);
486 goto out_finish; 659 if (ret < 0)
487 660 goto out_finish;
488 if (!ret) 661
662 if (!ret)
663 break;
664
665 memcpy(data[thr].unc + off,
666 data_of(*snapshot), PAGE_SIZE);
667
668 if (!(nr_pages % m))
669 printk(KERN_CONT "\b\b\b\b%3d%%",
670 nr_pages / m);
671 nr_pages++;
672 }
673 if (!off)
489 break; 674 break;
490 675
491 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE); 676 data[thr].unc_len = off;
492 677
493 if (!(nr_pages % m)) 678 atomic_set(&data[thr].ready, 1);
494 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); 679 wake_up(&data[thr].go);
495 nr_pages++;
496 } 680 }
497 681
498 if (!off) 682 if (!thr)
499 break; 683 break;
500 684
501 unc_len = off; 685 crc->run_threads = thr;
502 ret = lzo1x_1_compress(unc, unc_len, 686 atomic_set(&crc->ready, 1);
503 cmp + LZO_HEADER, &cmp_len, wrk); 687 wake_up(&crc->go);
504 if (ret < 0) {
505 printk(KERN_ERR "PM: LZO compression failed\n");
506 break;
507 }
508 688
509 if (unlikely(!cmp_len || 689 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
510 cmp_len > lzo1x_worst_compress(unc_len))) { 690 wait_event(data[thr].done,
511 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 691 atomic_read(&data[thr].stop));
512 ret = -1; 692 atomic_set(&data[thr].stop, 0);
513 break;
514 }
515 693
516 *(size_t *)cmp = cmp_len; 694 ret = data[thr].ret;
517 695
518 /* 696 if (ret < 0) {
519 * Given we are writing one page at a time to disk, we copy 697 printk(KERN_ERR "PM: LZO compression failed\n");
520 * that much from the buffer, although the last bit will likely 698 goto out_finish;
521 * be smaller than full page. This is OK - we saved the length 699 }
522 * of the compressed data, so any garbage at the end will be
523 * discarded when we read it.
524 */
525 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
526 memcpy(page, cmp + off, PAGE_SIZE);
527 700
528 ret = swap_write_page(handle, page, &bio); 701 if (unlikely(!data[thr].cmp_len ||
529 if (ret) 702 data[thr].cmp_len >
703 lzo1x_worst_compress(data[thr].unc_len))) {
704 printk(KERN_ERR
705 "PM: Invalid LZO compressed length\n");
706 ret = -1;
530 goto out_finish; 707 goto out_finish;
708 }
709
710 *(size_t *)data[thr].cmp = data[thr].cmp_len;
711
712 /*
713 * Given we are writing one page at a time to disk, we
714 * copy that much from the buffer, although the last
715 * bit will likely be smaller than full page. This is
716 * OK - we saved the length of the compressed data, so
717 * any garbage at the end will be discarded when we
718 * read it.
719 */
720 for (off = 0;
721 off < LZO_HEADER + data[thr].cmp_len;
722 off += PAGE_SIZE) {
723 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
724
725 ret = swap_write_page(handle, page, &bio);
726 if (ret)
727 goto out_finish;
728 }
531 } 729 }
730
731 wait_event(crc->done, atomic_read(&crc->stop));
732 atomic_set(&crc->stop, 0);
532 } 733 }
533 734
534out_finish: 735out_finish:
@@ -536,16 +737,25 @@ out_finish:
536 do_gettimeofday(&stop); 737 do_gettimeofday(&stop);
537 if (!ret) 738 if (!ret)
538 ret = err2; 739 ret = err2;
539 if (!ret) 740 if (!ret) {
540 printk(KERN_CONT "\b\b\b\bdone\n"); 741 printk(KERN_CONT "\b\b\b\bdone\n");
541 else 742 } else {
542 printk(KERN_CONT "\n"); 743 printk(KERN_CONT "\n");
744 }
543 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 745 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
544 746out_clean:
545 vfree(cmp); 747 if (crc) {
546 vfree(unc); 748 if (crc->thr)
547 vfree(wrk); 749 kthread_stop(crc->thr);
548 free_page((unsigned long)page); 750 kfree(crc);
751 }
752 if (data) {
753 for (thr = 0; thr < nr_threads; thr++)
754 if (data[thr].thr)
755 kthread_stop(data[thr].thr);
756 vfree(data);
757 }
758 if (page) free_page((unsigned long)page);
549 759
550 return ret; 760 return ret;
551} 761}
@@ -625,8 +835,15 @@ out_finish:
625 835
626static void release_swap_reader(struct swap_map_handle *handle) 836static void release_swap_reader(struct swap_map_handle *handle)
627{ 837{
628 if (handle->cur) 838 struct swap_map_page_list *tmp;
629 free_page((unsigned long)handle->cur); 839
840 while (handle->maps) {
841 if (handle->maps->map)
842 free_page((unsigned long)handle->maps->map);
843 tmp = handle->maps;
844 handle->maps = handle->maps->next;
845 kfree(tmp);
846 }
630 handle->cur = NULL; 847 handle->cur = NULL;
631} 848}
632 849
@@ -634,22 +851,46 @@ static int get_swap_reader(struct swap_map_handle *handle,
634 unsigned int *flags_p) 851 unsigned int *flags_p)
635{ 852{
636 int error; 853 int error;
854 struct swap_map_page_list *tmp, *last;
855 sector_t offset;
637 856
638 *flags_p = swsusp_header->flags; 857 *flags_p = swsusp_header->flags;
639 858
640 if (!swsusp_header->image) /* how can this happen? */ 859 if (!swsusp_header->image) /* how can this happen? */
641 return -EINVAL; 860 return -EINVAL;
642 861
643 handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH); 862 handle->cur = NULL;
644 if (!handle->cur) 863 last = handle->maps = NULL;
645 return -ENOMEM; 864 offset = swsusp_header->image;
865 while (offset) {
866 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
867 if (!tmp) {
868 release_swap_reader(handle);
869 return -ENOMEM;
870 }
871 memset(tmp, 0, sizeof(*tmp));
872 if (!handle->maps)
873 handle->maps = tmp;
874 if (last)
875 last->next = tmp;
876 last = tmp;
877
878 tmp->map = (struct swap_map_page *)
879 __get_free_page(__GFP_WAIT | __GFP_HIGH);
880 if (!tmp->map) {
881 release_swap_reader(handle);
882 return -ENOMEM;
883 }
646 884
647 error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL); 885 error = hib_bio_read_page(offset, tmp->map, NULL);
648 if (error) { 886 if (error) {
649 release_swap_reader(handle); 887 release_swap_reader(handle);
650 return error; 888 return error;
889 }
890 offset = tmp->map->next_swap;
651 } 891 }
652 handle->k = 0; 892 handle->k = 0;
893 handle->cur = handle->maps->map;
653 return 0; 894 return 0;
654} 895}
655 896
@@ -658,6 +899,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
658{ 899{
659 sector_t offset; 900 sector_t offset;
660 int error; 901 int error;
902 struct swap_map_page_list *tmp;
661 903
662 if (!handle->cur) 904 if (!handle->cur)
663 return -EINVAL; 905 return -EINVAL;
@@ -668,13 +910,15 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
668 if (error) 910 if (error)
669 return error; 911 return error;
670 if (++handle->k >= MAP_PAGE_ENTRIES) { 912 if (++handle->k >= MAP_PAGE_ENTRIES) {
671 error = hib_wait_on_bio_chain(bio_chain);
672 handle->k = 0; 913 handle->k = 0;
673 offset = handle->cur->next_swap; 914 free_page((unsigned long)handle->maps->map);
674 if (!offset) 915 tmp = handle->maps;
916 handle->maps = handle->maps->next;
917 kfree(tmp);
918 if (!handle->maps)
675 release_swap_reader(handle); 919 release_swap_reader(handle);
676 else if (!error) 920 else
677 error = hib_bio_read_page(offset, handle->cur, NULL); 921 handle->cur = handle->maps->map;
678 } 922 }
679 return error; 923 return error;
680} 924}
@@ -697,7 +941,7 @@ static int load_image(struct swap_map_handle *handle,
697 unsigned int nr_to_read) 941 unsigned int nr_to_read)
698{ 942{
699 unsigned int m; 943 unsigned int m;
700 int error = 0; 944 int ret = 0;
701 struct timeval start; 945 struct timeval start;
702 struct timeval stop; 946 struct timeval stop;
703 struct bio *bio; 947 struct bio *bio;
@@ -713,15 +957,15 @@ static int load_image(struct swap_map_handle *handle,
713 bio = NULL; 957 bio = NULL;
714 do_gettimeofday(&start); 958 do_gettimeofday(&start);
715 for ( ; ; ) { 959 for ( ; ; ) {
716 error = snapshot_write_next(snapshot); 960 ret = snapshot_write_next(snapshot);
717 if (error <= 0) 961 if (ret <= 0)
718 break; 962 break;
719 error = swap_read_page(handle, data_of(*snapshot), &bio); 963 ret = swap_read_page(handle, data_of(*snapshot), &bio);
720 if (error) 964 if (ret)
721 break; 965 break;
722 if (snapshot->sync_read) 966 if (snapshot->sync_read)
723 error = hib_wait_on_bio_chain(&bio); 967 ret = hib_wait_on_bio_chain(&bio);
724 if (error) 968 if (ret)
725 break; 969 break;
726 if (!(nr_pages % m)) 970 if (!(nr_pages % m))
727 printk("\b\b\b\b%3d%%", nr_pages / m); 971 printk("\b\b\b\b%3d%%", nr_pages / m);
@@ -729,17 +973,61 @@ static int load_image(struct swap_map_handle *handle,
729 } 973 }
730 err2 = hib_wait_on_bio_chain(&bio); 974 err2 = hib_wait_on_bio_chain(&bio);
731 do_gettimeofday(&stop); 975 do_gettimeofday(&stop);
732 if (!error) 976 if (!ret)
733 error = err2; 977 ret = err2;
734 if (!error) { 978 if (!ret) {
735 printk("\b\b\b\bdone\n"); 979 printk("\b\b\b\bdone\n");
736 snapshot_write_finalize(snapshot); 980 snapshot_write_finalize(snapshot);
737 if (!snapshot_image_loaded(snapshot)) 981 if (!snapshot_image_loaded(snapshot))
738 error = -ENODATA; 982 ret = -ENODATA;
739 } else 983 } else
740 printk("\n"); 984 printk("\n");
741 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 985 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
742 return error; 986 return ret;
987}
988
989/**
990 * Structure used for LZO data decompression.
991 */
992struct dec_data {
993 struct task_struct *thr; /* thread */
994 atomic_t ready; /* ready to start flag */
995 atomic_t stop; /* ready to stop flag */
996 int ret; /* return code */
997 wait_queue_head_t go; /* start decompression */
998 wait_queue_head_t done; /* decompression done */
999 size_t unc_len; /* uncompressed length */
1000 size_t cmp_len; /* compressed length */
1001 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1002 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1003};
1004
1005/**
1006 * Deompression function that runs in its own thread.
1007 */
1008static int lzo_decompress_threadfn(void *data)
1009{
1010 struct dec_data *d = data;
1011
1012 while (1) {
1013 wait_event(d->go, atomic_read(&d->ready) ||
1014 kthread_should_stop());
1015 if (kthread_should_stop()) {
1016 d->thr = NULL;
1017 d->ret = -1;
1018 atomic_set(&d->stop, 1);
1019 wake_up(&d->done);
1020 break;
1021 }
1022 atomic_set(&d->ready, 0);
1023
1024 d->unc_len = LZO_UNC_SIZE;
1025 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1026 d->unc, &d->unc_len);
1027 atomic_set(&d->stop, 1);
1028 wake_up(&d->done);
1029 }
1030 return 0;
743} 1031}
744 1032
745/** 1033/**
@@ -753,50 +1041,120 @@ static int load_image_lzo(struct swap_map_handle *handle,
753 unsigned int nr_to_read) 1041 unsigned int nr_to_read)
754{ 1042{
755 unsigned int m; 1043 unsigned int m;
756 int error = 0; 1044 int ret = 0;
1045 int eof = 0;
757 struct bio *bio; 1046 struct bio *bio;
758 struct timeval start; 1047 struct timeval start;
759 struct timeval stop; 1048 struct timeval stop;
760 unsigned nr_pages; 1049 unsigned nr_pages;
761 size_t i, off, unc_len, cmp_len; 1050 size_t off;
762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES]; 1051 unsigned i, thr, run_threads, nr_threads;
763 1052 unsigned ring = 0, pg = 0, ring_size = 0,
764 for (i = 0; i < LZO_CMP_PAGES; i++) { 1053 have = 0, want, need, asked = 0;
765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 1054 unsigned long read_pages;
766 if (!page[i]) { 1055 unsigned char **page = NULL;
767 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 1056 struct dec_data *data = NULL;
1057 struct crc_data *crc = NULL;
1058
1059 /*
1060 * We'll limit the number of threads for decompression to limit memory
1061 * footprint.
1062 */
1063 nr_threads = num_online_cpus() - 1;
1064 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1065
1066 page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
1067 if (!page) {
1068 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1069 ret = -ENOMEM;
1070 goto out_clean;
1071 }
768 1072
769 while (i) 1073 data = vmalloc(sizeof(*data) * nr_threads);
770 free_page((unsigned long)page[--i]); 1074 if (!data) {
1075 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1076 ret = -ENOMEM;
1077 goto out_clean;
1078 }
1079 for (thr = 0; thr < nr_threads; thr++)
1080 memset(&data[thr], 0, offsetof(struct dec_data, go));
771 1081
772 return -ENOMEM; 1082 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1083 if (!crc) {
1084 printk(KERN_ERR "PM: Failed to allocate crc\n");
1085 ret = -ENOMEM;
1086 goto out_clean;
1087 }
1088 memset(crc, 0, offsetof(struct crc_data, go));
1089
1090 /*
1091 * Start the decompression threads.
1092 */
1093 for (thr = 0; thr < nr_threads; thr++) {
1094 init_waitqueue_head(&data[thr].go);
1095 init_waitqueue_head(&data[thr].done);
1096
1097 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1098 &data[thr],
1099 "image_decompress/%u", thr);
1100 if (IS_ERR(data[thr].thr)) {
1101 data[thr].thr = NULL;
1102 printk(KERN_ERR
1103 "PM: Cannot start decompression threads\n");
1104 ret = -ENOMEM;
1105 goto out_clean;
773 } 1106 }
774 } 1107 }
775 1108
776 unc = vmalloc(LZO_UNC_SIZE); 1109 /*
777 if (!unc) { 1110 * Start the CRC32 thread.
778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 1111 */
779 1112 init_waitqueue_head(&crc->go);
780 for (i = 0; i < LZO_CMP_PAGES; i++) 1113 init_waitqueue_head(&crc->done);
781 free_page((unsigned long)page[i]); 1114
782 1115 handle->crc32 = 0;
783 return -ENOMEM; 1116 crc->crc32 = &handle->crc32;
1117 for (thr = 0; thr < nr_threads; thr++) {
1118 crc->unc[thr] = data[thr].unc;
1119 crc->unc_len[thr] = &data[thr].unc_len;
784 } 1120 }
785 1121
786 cmp = vmalloc(LZO_CMP_SIZE); 1122 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
787 if (!cmp) { 1123 if (IS_ERR(crc->thr)) {
788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 1124 crc->thr = NULL;
1125 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1126 ret = -ENOMEM;
1127 goto out_clean;
1128 }
789 1129
790 vfree(unc); 1130 /*
791 for (i = 0; i < LZO_CMP_PAGES; i++) 1131 * Adjust number of pages for read buffering, in case we are short.
792 free_page((unsigned long)page[i]); 1132 */
1133 read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
1134 read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
793 1135
794 return -ENOMEM; 1136 for (i = 0; i < read_pages; i++) {
1137 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1138 __GFP_WAIT | __GFP_HIGH :
1139 __GFP_WAIT);
1140 if (!page[i]) {
1141 if (i < LZO_CMP_PAGES) {
1142 ring_size = i;
1143 printk(KERN_ERR
1144 "PM: Failed to allocate LZO pages\n");
1145 ret = -ENOMEM;
1146 goto out_clean;
1147 } else {
1148 break;
1149 }
1150 }
795 } 1151 }
1152 want = ring_size = i;
796 1153
797 printk(KERN_INFO 1154 printk(KERN_INFO
1155 "PM: Using %u thread(s) for decompression.\n"
798 "PM: Loading and decompressing image data (%u pages) ... ", 1156 "PM: Loading and decompressing image data (%u pages) ... ",
799 nr_to_read); 1157 nr_threads, nr_to_read);
800 m = nr_to_read / 100; 1158 m = nr_to_read / 100;
801 if (!m) 1159 if (!m)
802 m = 1; 1160 m = 1;
@@ -804,85 +1162,189 @@ static int load_image_lzo(struct swap_map_handle *handle,
804 bio = NULL; 1162 bio = NULL;
805 do_gettimeofday(&start); 1163 do_gettimeofday(&start);
806 1164
807 error = snapshot_write_next(snapshot); 1165 ret = snapshot_write_next(snapshot);
808 if (error <= 0) 1166 if (ret <= 0)
809 goto out_finish; 1167 goto out_finish;
810 1168
811 for (;;) { 1169 for(;;) {
812 error = swap_read_page(handle, page[0], NULL); /* sync */ 1170 for (i = 0; !eof && i < want; i++) {
813 if (error) 1171 ret = swap_read_page(handle, page[ring], &bio);
814 break; 1172 if (ret) {
815 1173 /*
816 cmp_len = *(size_t *)page[0]; 1174 * On real read error, finish. On end of data,
817 if (unlikely(!cmp_len || 1175 * set EOF flag and just exit the read loop.
818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { 1176 */
819 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 1177 if (handle->cur &&
820 error = -1; 1178 handle->cur->entries[handle->k]) {
821 break; 1179 goto out_finish;
1180 } else {
1181 eof = 1;
1182 break;
1183 }
1184 }
1185 if (++ring >= ring_size)
1186 ring = 0;
822 } 1187 }
1188 asked += i;
1189 want -= i;
823 1190
824 for (off = PAGE_SIZE, i = 1; 1191 /*
825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { 1192 * We are out of data, wait for some more.
826 error = swap_read_page(handle, page[i], &bio); 1193 */
827 if (error) 1194 if (!have) {
1195 if (!asked)
1196 break;
1197
1198 ret = hib_wait_on_bio_chain(&bio);
1199 if (ret)
828 goto out_finish; 1200 goto out_finish;
1201 have += asked;
1202 asked = 0;
1203 if (eof)
1204 eof = 2;
829 } 1205 }
830 1206
831 error = hib_wait_on_bio_chain(&bio); /* need all data now */ 1207 if (crc->run_threads) {
832 if (error) 1208 wait_event(crc->done, atomic_read(&crc->stop));
833 goto out_finish; 1209 atomic_set(&crc->stop, 0);
834 1210 crc->run_threads = 0;
835 for (off = 0, i = 0;
836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
837 memcpy(cmp + off, page[i], PAGE_SIZE);
838 } 1211 }
839 1212
840 unc_len = LZO_UNC_SIZE; 1213 for (thr = 0; have && thr < nr_threads; thr++) {
841 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len, 1214 data[thr].cmp_len = *(size_t *)page[pg];
842 unc, &unc_len); 1215 if (unlikely(!data[thr].cmp_len ||
843 if (error < 0) { 1216 data[thr].cmp_len >
844 printk(KERN_ERR "PM: LZO decompression failed\n"); 1217 lzo1x_worst_compress(LZO_UNC_SIZE))) {
845 break; 1218 printk(KERN_ERR
1219 "PM: Invalid LZO compressed length\n");
1220 ret = -1;
1221 goto out_finish;
1222 }
1223
1224 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1225 PAGE_SIZE);
1226 if (need > have) {
1227 if (eof > 1) {
1228 ret = -1;
1229 goto out_finish;
1230 }
1231 break;
1232 }
1233
1234 for (off = 0;
1235 off < LZO_HEADER + data[thr].cmp_len;
1236 off += PAGE_SIZE) {
1237 memcpy(data[thr].cmp + off,
1238 page[pg], PAGE_SIZE);
1239 have--;
1240 want++;
1241 if (++pg >= ring_size)
1242 pg = 0;
1243 }
1244
1245 atomic_set(&data[thr].ready, 1);
1246 wake_up(&data[thr].go);
846 } 1247 }
847 1248
848 if (unlikely(!unc_len || 1249 /*
849 unc_len > LZO_UNC_SIZE || 1250 * Wait for more data while we are decompressing.
850 unc_len & (PAGE_SIZE - 1))) { 1251 */
851 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n"); 1252 if (have < LZO_CMP_PAGES && asked) {
852 error = -1; 1253 ret = hib_wait_on_bio_chain(&bio);
853 break; 1254 if (ret)
1255 goto out_finish;
1256 have += asked;
1257 asked = 0;
1258 if (eof)
1259 eof = 2;
854 } 1260 }
855 1261
856 for (off = 0; off < unc_len; off += PAGE_SIZE) { 1262 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
857 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE); 1263 wait_event(data[thr].done,
1264 atomic_read(&data[thr].stop));
1265 atomic_set(&data[thr].stop, 0);
1266
1267 ret = data[thr].ret;
858 1268
859 if (!(nr_pages % m)) 1269 if (ret < 0) {
860 printk("\b\b\b\b%3d%%", nr_pages / m); 1270 printk(KERN_ERR
861 nr_pages++; 1271 "PM: LZO decompression failed\n");
1272 goto out_finish;
1273 }
862 1274
863 error = snapshot_write_next(snapshot); 1275 if (unlikely(!data[thr].unc_len ||
864 if (error <= 0) 1276 data[thr].unc_len > LZO_UNC_SIZE ||
1277 data[thr].unc_len & (PAGE_SIZE - 1))) {
1278 printk(KERN_ERR
1279 "PM: Invalid LZO uncompressed length\n");
1280 ret = -1;
865 goto out_finish; 1281 goto out_finish;
1282 }
1283
1284 for (off = 0;
1285 off < data[thr].unc_len; off += PAGE_SIZE) {
1286 memcpy(data_of(*snapshot),
1287 data[thr].unc + off, PAGE_SIZE);
1288
1289 if (!(nr_pages % m))
1290 printk("\b\b\b\b%3d%%", nr_pages / m);
1291 nr_pages++;
1292
1293 ret = snapshot_write_next(snapshot);
1294 if (ret <= 0) {
1295 crc->run_threads = thr + 1;
1296 atomic_set(&crc->ready, 1);
1297 wake_up(&crc->go);
1298 goto out_finish;
1299 }
1300 }
866 } 1301 }
1302
1303 crc->run_threads = thr;
1304 atomic_set(&crc->ready, 1);
1305 wake_up(&crc->go);
867 } 1306 }
868 1307
869out_finish: 1308out_finish:
1309 if (crc->run_threads) {
1310 wait_event(crc->done, atomic_read(&crc->stop));
1311 atomic_set(&crc->stop, 0);
1312 }
870 do_gettimeofday(&stop); 1313 do_gettimeofday(&stop);
871 if (!error) { 1314 if (!ret) {
872 printk("\b\b\b\bdone\n"); 1315 printk("\b\b\b\bdone\n");
873 snapshot_write_finalize(snapshot); 1316 snapshot_write_finalize(snapshot);
874 if (!snapshot_image_loaded(snapshot)) 1317 if (!snapshot_image_loaded(snapshot))
875 error = -ENODATA; 1318 ret = -ENODATA;
1319 if (!ret) {
1320 if (swsusp_header->flags & SF_CRC32_MODE) {
1321 if(handle->crc32 != swsusp_header->crc32) {
1322 printk(KERN_ERR
1323 "PM: Invalid image CRC32!\n");
1324 ret = -ENODATA;
1325 }
1326 }
1327 }
876 } else 1328 } else
877 printk("\n"); 1329 printk("\n");
878 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 1330 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
879 1331out_clean:
880 vfree(cmp); 1332 for (i = 0; i < ring_size; i++)
881 vfree(unc);
882 for (i = 0; i < LZO_CMP_PAGES; i++)
883 free_page((unsigned long)page[i]); 1333 free_page((unsigned long)page[i]);
1334 if (crc) {
1335 if (crc->thr)
1336 kthread_stop(crc->thr);
1337 kfree(crc);
1338 }
1339 if (data) {
1340 for (thr = 0; thr < nr_threads; thr++)
1341 if (data[thr].thr)
1342 kthread_stop(data[thr].thr);
1343 vfree(data);
1344 }
1345 if (page) vfree(page);
884 1346
885 return error; 1347 return ret;
886} 1348}
887 1349
888/** 1350/**