aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2006-03-23 05:59:59 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:07 -0500
commitf577eb30afdc68233f25d4d82b04102129262365 (patch)
tree25d3c2fa8dfbf42fd0d4776a36166736fcc1446a /kernel
parent2b322ce210aec74ae0d02938d3a01e29fe079469 (diff)
[PATCH] swsusp: low level interface
Introduce the low level interface that can be used for handling the snapshot of the system memory by the in-kernel swap-writing/reading code of swsusp and the userland interface code (to be introduced shortly). Also change the way in which swsusp records the allocated swap pages and, consequently, simplifies the in-kernel swap-writing/reading code (this is necessary for the userland interface too). To this end, it introduces two helper functions in mm/swapfile.c, so that the swsusp code does not refer directly to the swap internals. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/power/disk.c12
-rw-r--r--kernel/power/power.h26
-rw-r--r--kernel/power/snapshot.c326
-rw-r--r--kernel/power/swsusp.c723
4 files changed, 599 insertions, 488 deletions
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0b43847dc980..4eb464b71347 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -26,9 +26,9 @@ extern suspend_disk_method_t pm_disk_mode;
26 26
27extern int swsusp_shrink_memory(void); 27extern int swsusp_shrink_memory(void);
28extern int swsusp_suspend(void); 28extern int swsusp_suspend(void);
29extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages); 29extern int swsusp_write(void);
30extern int swsusp_check(void); 30extern int swsusp_check(void);
31extern int swsusp_read(struct pbe **pblist_ptr); 31extern int swsusp_read(void);
32extern void swsusp_close(void); 32extern void swsusp_close(void);
33extern int swsusp_resume(void); 33extern int swsusp_resume(void);
34 34
@@ -70,10 +70,6 @@ static void power_down(suspend_disk_method_t mode)
70 while(1); 70 while(1);
71} 71}
72 72
73
74static int in_suspend __nosavedata = 0;
75
76
77static inline void platform_finish(void) 73static inline void platform_finish(void)
78{ 74{
79 if (pm_disk_mode == PM_DISK_PLATFORM) { 75 if (pm_disk_mode == PM_DISK_PLATFORM) {
@@ -145,7 +141,7 @@ int pm_suspend_disk(void)
145 if (in_suspend) { 141 if (in_suspend) {
146 device_resume(); 142 device_resume();
147 pr_debug("PM: writing image.\n"); 143 pr_debug("PM: writing image.\n");
148 error = swsusp_write(pagedir_nosave, nr_copy_pages); 144 error = swsusp_write();
149 if (!error) 145 if (!error)
150 power_down(pm_disk_mode); 146 power_down(pm_disk_mode);
151 else { 147 else {
@@ -216,7 +212,7 @@ static int software_resume(void)
216 212
217 pr_debug("PM: Reading swsusp image.\n"); 213 pr_debug("PM: Reading swsusp image.\n");
218 214
219 if ((error = swsusp_read(&pagedir_nosave))) { 215 if ((error = swsusp_read())) {
220 swsusp_free(); 216 swsusp_free();
221 goto Thaw; 217 goto Thaw;
222 } 218 }
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 388dba680841..ea7132ed029b 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -37,21 +37,31 @@ extern struct subsystem power_subsys;
37/* References to section boundaries */ 37/* References to section boundaries */
38extern const void __nosave_begin, __nosave_end; 38extern const void __nosave_begin, __nosave_end;
39 39
40extern unsigned int nr_copy_pages;
41extern struct pbe *pagedir_nosave; 40extern struct pbe *pagedir_nosave;
42 41
43/* Preferred image size in bytes (default 500 MB) */ 42/* Preferred image size in bytes (default 500 MB) */
44extern unsigned long image_size; 43extern unsigned long image_size;
45 44
45extern int in_suspend;
46
46extern asmlinkage int swsusp_arch_suspend(void); 47extern asmlinkage int swsusp_arch_suspend(void);
47extern asmlinkage int swsusp_arch_resume(void); 48extern asmlinkage int swsusp_arch_resume(void);
48 49
49extern unsigned int count_data_pages(void); 50extern unsigned int count_data_pages(void);
50extern void free_pagedir(struct pbe *pblist);
51extern void release_eaten_pages(void);
52extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed);
53extern void swsusp_free(void); 51extern void swsusp_free(void);
54extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); 52
55extern unsigned int snapshot_nr_pages(void); 53struct snapshot_handle {
56extern struct pbe *snapshot_pblist(void); 54 loff_t offset;
57extern void snapshot_pblist_set(struct pbe *pblist); 55 unsigned int page;
56 unsigned int page_offset;
57 unsigned int prev;
58 struct pbe *pbe;
59 void *buffer;
60 unsigned int buf_offset;
61};
62
63#define data_of(handle) ((handle).buffer + (handle).buf_offset)
64
65extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
66extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
67int snapshot_image_loaded(struct snapshot_handle *handle);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 8d5a5986d621..cc349437fb72 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12 12
13#include <linux/version.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/suspend.h> 16#include <linux/suspend.h>
@@ -34,7 +35,8 @@
34#include "power.h" 35#include "power.h"
35 36
36struct pbe *pagedir_nosave; 37struct pbe *pagedir_nosave;
37unsigned int nr_copy_pages; 38static unsigned int nr_copy_pages;
39static unsigned int nr_meta_pages;
38 40
39#ifdef CONFIG_HIGHMEM 41#ifdef CONFIG_HIGHMEM
40unsigned int count_highmem_pages(void) 42unsigned int count_highmem_pages(void)
@@ -235,7 +237,7 @@ static void copy_data_pages(struct pbe *pblist)
235 * free_pagedir - free pages allocated with alloc_pagedir() 237 * free_pagedir - free pages allocated with alloc_pagedir()
236 */ 238 */
237 239
238void free_pagedir(struct pbe *pblist) 240static void free_pagedir(struct pbe *pblist)
239{ 241{
240 struct pbe *pbe; 242 struct pbe *pbe;
241 243
@@ -301,7 +303,7 @@ struct eaten_page {
301 303
302static struct eaten_page *eaten_pages = NULL; 304static struct eaten_page *eaten_pages = NULL;
303 305
304void release_eaten_pages(void) 306static void release_eaten_pages(void)
305{ 307{
306 struct eaten_page *p, *q; 308 struct eaten_page *p, *q;
307 309
@@ -376,7 +378,6 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
376 if (!nr_pages) 378 if (!nr_pages)
377 return NULL; 379 return NULL;
378 380
379 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
380 pblist = alloc_image_page(gfp_mask, safe_needed); 381 pblist = alloc_image_page(gfp_mask, safe_needed);
381 /* FIXME: rewrite this ugly loop */ 382 /* FIXME: rewrite this ugly loop */
382 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; 383 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
@@ -414,6 +415,9 @@ void swsusp_free(void)
414 } 415 }
415 } 416 }
416 } 417 }
418 nr_copy_pages = 0;
419 nr_meta_pages = 0;
420 pagedir_nosave = NULL;
417} 421}
418 422
419 423
@@ -437,7 +441,7 @@ static int enough_free_mem(unsigned int nr_pages)
437 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 441 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
438} 442}
439 443
440int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) 444static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
441{ 445{
442 struct pbe *p; 446 struct pbe *p;
443 447
@@ -504,7 +508,319 @@ asmlinkage int swsusp_save(void)
504 */ 508 */
505 509
506 nr_copy_pages = nr_pages; 510 nr_copy_pages = nr_pages;
511 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
507 512
508 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); 513 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
509 return 0; 514 return 0;
510} 515}
516
517static void init_header(struct swsusp_info *info)
518{
519 memset(info, 0, sizeof(struct swsusp_info));
520 info->version_code = LINUX_VERSION_CODE;
521 info->num_physpages = num_physpages;
522 memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
523 info->cpus = num_online_cpus();
524 info->image_pages = nr_copy_pages;
525 info->pages = nr_copy_pages + nr_meta_pages + 1;
526}
527
528/**
529 * pack_orig_addresses - the .orig_address fields of the PBEs from the
530 * list starting at @pbe are stored in the array @buf[] (1 page)
531 */
532
533static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
534{
535 int j;
536
537 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
538 buf[j] = pbe->orig_address;
539 pbe = pbe->next;
540 }
541 if (!pbe)
542 for (; j < PAGE_SIZE / sizeof(long); j++)
543 buf[j] = 0;
544 return pbe;
545}
546
547/**
548 * snapshot_read_next - used for reading the system memory snapshot.
549 *
550 * On the first call to it @handle should point to a zeroed
551 * snapshot_handle structure. The structure gets updated and a pointer
552 * to it should be passed to this function every next time.
553 *
554 * The @count parameter should contain the number of bytes the caller
555 * wants to read from the snapshot. It must not be zero.
556 *
557 * On success the function returns a positive number. Then, the caller
558 * is allowed to read up to the returned number of bytes from the memory
559 * location computed by the data_of() macro. The number returned
560 * may be smaller than @count, but this only happens if the read would
561 * cross a page boundary otherwise.
562 *
563 * The function returns 0 to indicate the end of data stream condition,
564 * and a negative number is returned on error. In such cases the
565 * structure pointed to by @handle is not updated and should not be used
566 * any more.
567 */
568
569int snapshot_read_next(struct snapshot_handle *handle, size_t count)
570{
571 static unsigned long *buffer;
572
573 if (handle->page > nr_meta_pages + nr_copy_pages)
574 return 0;
575 if (!buffer) {
576 /* This makes the buffer be freed by swsusp_free() */
577 buffer = alloc_image_page(GFP_ATOMIC, 0);
578 if (!buffer)
579 return -ENOMEM;
580 }
581 if (!handle->offset) {
582 init_header((struct swsusp_info *)buffer);
583 handle->buffer = buffer;
584 handle->pbe = pagedir_nosave;
585 }
586 if (handle->prev < handle->page) {
587 if (handle->page <= nr_meta_pages) {
588 handle->pbe = pack_orig_addresses(buffer, handle->pbe);
589 if (!handle->pbe)
590 handle->pbe = pagedir_nosave;
591 } else {
592 handle->buffer = (void *)handle->pbe->address;
593 handle->pbe = handle->pbe->next;
594 }
595 handle->prev = handle->page;
596 }
597 handle->buf_offset = handle->page_offset;
598 if (handle->page_offset + count >= PAGE_SIZE) {
599 count = PAGE_SIZE - handle->page_offset;
600 handle->page_offset = 0;
601 handle->page++;
602 } else {
603 handle->page_offset += count;
604 }
605 handle->offset += count;
606 return count;
607}
608
609/**
610 * mark_unsafe_pages - mark the pages that cannot be used for storing
611 * the image during resume, because they conflict with the pages that
612 * had been used before suspend
613 */
614
615static int mark_unsafe_pages(struct pbe *pblist)
616{
617 struct zone *zone;
618 unsigned long zone_pfn;
619 struct pbe *p;
620
621 if (!pblist) /* a sanity check */
622 return -EINVAL;
623
624 /* Clear page flags */
625 for_each_zone (zone) {
626 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
627 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
628 ClearPageNosaveFree(pfn_to_page(zone_pfn +
629 zone->zone_start_pfn));
630 }
631
632 /* Mark orig addresses */
633 for_each_pbe (p, pblist) {
634 if (virt_addr_valid(p->orig_address))
635 SetPageNosaveFree(virt_to_page(p->orig_address));
636 else
637 return -EFAULT;
638 }
639
640 return 0;
641}
642
643static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
644{
645 /* We assume both lists contain the same number of elements */
646 while (src) {
647 dst->orig_address = src->orig_address;
648 dst = dst->next;
649 src = src->next;
650 }
651}
652
653static int check_header(struct swsusp_info *info)
654{
655 char *reason = NULL;
656
657 if (info->version_code != LINUX_VERSION_CODE)
658 reason = "kernel version";
659 if (info->num_physpages != num_physpages)
660 reason = "memory size";
661 if (strcmp(info->uts.sysname,system_utsname.sysname))
662 reason = "system type";
663 if (strcmp(info->uts.release,system_utsname.release))
664 reason = "kernel release";
665 if (strcmp(info->uts.version,system_utsname.version))
666 reason = "version";
667 if (strcmp(info->uts.machine,system_utsname.machine))
668 reason = "machine";
669 if (reason) {
670 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
671 return -EPERM;
672 }
673 return 0;
674}
675
676/**
677 * load header - check the image header and copy data from it
678 */
679
680static int load_header(struct snapshot_handle *handle,
681 struct swsusp_info *info)
682{
683 int error;
684 struct pbe *pblist;
685
686 error = check_header(info);
687 if (!error) {
688 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
689 if (!pblist)
690 return -ENOMEM;
691 pagedir_nosave = pblist;
692 handle->pbe = pblist;
693 nr_copy_pages = info->image_pages;
694 nr_meta_pages = info->pages - info->image_pages - 1;
695 }
696 return error;
697}
698
699/**
700 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
701 * the PBEs in the list starting at @pbe
702 */
703
704static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
705 struct pbe *pbe)
706{
707 int j;
708
709 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
710 pbe->orig_address = buf[j];
711 pbe = pbe->next;
712 }
713 return pbe;
714}
715
716/**
717 * create_image - use metadata contained in the PBE list
718 * pointed to by pagedir_nosave to mark the pages that will
719 * be overwritten in the process of restoring the system
720 * memory state from the image and allocate memory for
721 * the image avoiding these pages
722 */
723
724static int create_image(struct snapshot_handle *handle)
725{
726 int error = 0;
727 struct pbe *p, *pblist;
728
729 p = pagedir_nosave;
730 error = mark_unsafe_pages(p);
731 if (!error) {
732 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
733 if (pblist)
734 copy_page_backup_list(pblist, p);
735 free_pagedir(p);
736 if (!pblist)
737 error = -ENOMEM;
738 }
739 if (!error)
740 error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
741 if (!error) {
742 release_eaten_pages();
743 pagedir_nosave = pblist;
744 } else {
745 pagedir_nosave = NULL;
746 handle->pbe = NULL;
747 nr_copy_pages = 0;
748 nr_meta_pages = 0;
749 }
750 return error;
751}
752
753/**
754 * snapshot_write_next - used for writing the system memory snapshot.
755 *
756 * On the first call to it @handle should point to a zeroed
757 * snapshot_handle structure. The structure gets updated and a pointer
758 * to it should be passed to this function every next time.
759 *
760 * The @count parameter should contain the number of bytes the caller
761 * wants to write to the image. It must not be zero.
762 *
763 * On success the function returns a positive number. Then, the caller
764 * is allowed to write up to the returned number of bytes to the memory
765 * location computed by the data_of() macro. The number returned
766 * may be smaller than @count, but this only happens if the write would
767 * cross a page boundary otherwise.
768 *
769 * The function returns 0 to indicate the "end of file" condition,
770 * and a negative number is returned on error. In such cases the
771 * structure pointed to by @handle is not updated and should not be used
772 * any more.
773 */
774
775int snapshot_write_next(struct snapshot_handle *handle, size_t count)
776{
777 static unsigned long *buffer;
778 int error = 0;
779
780 if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages)
781 return 0;
782 if (!buffer) {
783 /* This makes the buffer be freed by swsusp_free() */
784 buffer = alloc_image_page(GFP_ATOMIC, 0);
785 if (!buffer)
786 return -ENOMEM;
787 }
788 if (!handle->offset)
789 handle->buffer = buffer;
790 if (handle->prev < handle->page) {
791 if (!handle->prev) {
792 error = load_header(handle, (struct swsusp_info *)buffer);
793 if (error)
794 return error;
795 } else if (handle->prev <= nr_meta_pages) {
796 handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
797 if (!handle->pbe) {
798 error = create_image(handle);
799 if (error)
800 return error;
801 handle->pbe = pagedir_nosave;
802 handle->buffer = (void *)handle->pbe->address;
803 }
804 } else {
805 handle->pbe = handle->pbe->next;
806 handle->buffer = (void *)handle->pbe->address;
807 }
808 handle->prev = handle->page;
809 }
810 handle->buf_offset = handle->page_offset;
811 if (handle->page_offset + count >= PAGE_SIZE) {
812 count = PAGE_SIZE - handle->page_offset;
813 handle->page_offset = 0;
814 handle->page++;
815 } else {
816 handle->page_offset += count;
817 }
818 handle->offset += count;
819 return count;
820}
821
822int snapshot_image_loaded(struct snapshot_handle *handle)
823{
824 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
825 handle->page <= nr_meta_pages + nr_copy_pages);
826}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 4e90905f0e87..457084f50010 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -77,6 +77,8 @@
77 */ 77 */
78unsigned long image_size = 500 * 1024 * 1024; 78unsigned long image_size = 500 * 1024 * 1024;
79 79
80int in_suspend __nosavedata = 0;
81
80#ifdef CONFIG_HIGHMEM 82#ifdef CONFIG_HIGHMEM
81unsigned int count_highmem_pages(void); 83unsigned int count_highmem_pages(void);
82int save_highmem(void); 84int save_highmem(void);
@@ -98,8 +100,6 @@ static struct swsusp_header {
98 char sig[10]; 100 char sig[10];
99} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; 101} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
100 102
101static struct swsusp_info swsusp_info;
102
103/* 103/*
104 * Saving part... 104 * Saving part...
105 */ 105 */
@@ -129,255 +129,261 @@ static int mark_swapfiles(swp_entry_t start)
129 return error; 129 return error;
130} 130}
131 131
132/* 132/**
133 * Check whether the swap device is the specified resume 133 * swsusp_swap_check - check if the resume device is a swap device
134 * device, irrespective of whether they are specified by 134 * and get its index (if so)
135 * identical names.
136 *
137 * (Thus, device inode aliasing is allowed. You can say /dev/hda4
138 * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs]
139 * and they'll be considered the same device. This is *necessary* for
140 * devfs, since the resume code can only recognize the form /dev/hda4,
141 * but the suspend code would see the long name.)
142 */ 135 */
143static inline int is_resume_device(const struct swap_info_struct *swap_info)
144{
145 struct file *file = swap_info->swap_file;
146 struct inode *inode = file->f_dentry->d_inode;
147
148 return S_ISBLK(inode->i_mode) &&
149 swsusp_resume_device == MKDEV(imajor(inode), iminor(inode));
150}
151 136
152static int swsusp_swap_check(void) /* This is called before saving image */ 137static int swsusp_swap_check(void) /* This is called before saving image */
153{ 138{
154 int i; 139 int res = swap_type_of(swsusp_resume_device);
155 140
156 if (!swsusp_resume_device) 141 if (res >= 0) {
157 return -ENODEV; 142 root_swap = res;
158 spin_lock(&swap_lock); 143 return 0;
159 for (i = 0; i < MAX_SWAPFILES; i++) {
160 if (!(swap_info[i].flags & SWP_WRITEOK))
161 continue;
162 if (is_resume_device(swap_info + i)) {
163 spin_unlock(&swap_lock);
164 root_swap = i;
165 return 0;
166 }
167 } 144 }
168 spin_unlock(&swap_lock); 145 return res;
169 return -ENODEV;
170} 146}
171 147
172/** 148/**
173 * write_page - Write one page to a fresh swap location. 149 * The bitmap is used for tracing allocated swap pages
174 * @addr: Address we're writing.
175 * @loc: Place to store the entry we used.
176 * 150 *
177 * Allocate a new swap entry and 'sync' it. Note we discard -EIO 151 * The entire bitmap consists of a number of bitmap_page
178 * errors. That is an artifact left over from swsusp. It did not 152 * structures linked with the help of the .next member.
179 * check the return of rw_swap_page_sync() at all, since most pages 153 * Thus each page can be allocated individually, so we only
180 * written back to swap would return -EIO. 154 * need to make 0-order memory allocations to create
181 * This is a partial improvement, since we will at least return other 155 * the bitmap.
182 * errors, though we need to eventually fix the damn code.
183 */ 156 */
184static int write_page(unsigned long addr, swp_entry_t *loc)
185{
186 swp_entry_t entry;
187 int error = -ENOSPC;
188 157
189 entry = get_swap_page_of_type(root_swap); 158#define BITMAP_PAGE_SIZE (PAGE_SIZE - sizeof(void *))
190 if (swp_offset(entry)) { 159#define BITMAP_PAGE_CHUNKS (BITMAP_PAGE_SIZE / sizeof(long))
191 error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr)); 160#define BITS_PER_CHUNK (sizeof(long) * 8)
192 if (!error || error == -EIO) 161#define BITMAP_PAGE_BITS (BITMAP_PAGE_CHUNKS * BITS_PER_CHUNK)
193 *loc = entry; 162
194 } 163struct bitmap_page {
195 return error; 164 unsigned long chunks[BITMAP_PAGE_CHUNKS];
196} 165 struct bitmap_page *next;
166};
197 167
198/** 168/**
199 * Swap map-handling functions 169 * The following functions are used for tracing the allocated
170 * swap pages, so that they can be freed in case of an error.
200 * 171 *
201 * The swap map is a data structure used for keeping track of each page 172 * The functions operate on a linked bitmap structure defined
202 * written to the swap. It consists of many swap_map_page structures 173 * above
203 * that contain each an array of MAP_PAGE_SIZE swap entries.
204 * These structures are linked together with the help of either the
205 * .next (in memory) or the .next_swap (in swap) member.
206 *
207 * The swap map is created during suspend. At that time we need to keep
208 * it in memory, because we have to free all of the allocated swap
209 * entries if an error occurs. The memory needed is preallocated
210 * so that we know in advance if there's enough of it.
211 *
212 * The first swap_map_page structure is filled with the swap entries that
213 * correspond to the first MAP_PAGE_SIZE data pages written to swap and
214 * so on. After the all of the data pages have been written, the order
215 * of the swap_map_page structures in the map is reversed so that they
216 * can be read from swap in the original order. This causes the data
217 * pages to be loaded in exactly the same order in which they have been
218 * saved.
219 *
220 * During resume we only need to use one swap_map_page structure
221 * at a time, which means that we only need to use two memory pages for
222 * reading the image - one for reading the swap_map_page structures
223 * and the second for reading the data pages from swap.
224 */ 174 */
225 175
226#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \ 176static void free_bitmap(struct bitmap_page *bitmap)
227 / sizeof(swp_entry_t))
228
229struct swap_map_page {
230 swp_entry_t entries[MAP_PAGE_SIZE];
231 swp_entry_t next_swap;
232 struct swap_map_page *next;
233};
234
235static inline void free_swap_map(struct swap_map_page *swap_map)
236{ 177{
237 struct swap_map_page *swp; 178 struct bitmap_page *bp;
238 179
239 while (swap_map) { 180 while (bitmap) {
240 swp = swap_map->next; 181 bp = bitmap->next;
241 free_page((unsigned long)swap_map); 182 free_page((unsigned long)bitmap);
242 swap_map = swp; 183 bitmap = bp;
243 } 184 }
244} 185}
245 186
246static struct swap_map_page *alloc_swap_map(unsigned int nr_pages) 187static struct bitmap_page *alloc_bitmap(unsigned int nr_bits)
247{ 188{
248 struct swap_map_page *swap_map, *swp; 189 struct bitmap_page *bitmap, *bp;
249 unsigned n = 0; 190 unsigned int n;
250 191
251 if (!nr_pages) 192 if (!nr_bits)
252 return NULL; 193 return NULL;
253 194
254 pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages); 195 bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
255 swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); 196 bp = bitmap;
256 swp = swap_map; 197 for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) {
257 for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) { 198 bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
258 swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); 199 bp = bp->next;
259 swp = swp->next; 200 if (!bp) {
260 if (!swp) { 201 free_bitmap(bitmap);
261 free_swap_map(swap_map);
262 return NULL; 202 return NULL;
263 } 203 }
264 } 204 }
265 return swap_map; 205 return bitmap;
266} 206}
267 207
268/** 208static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit)
269 * reverse_swap_map - reverse the order of pages in the swap map
270 * @swap_map
271 */
272
273static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map)
274{ 209{
275 struct swap_map_page *prev, *next; 210 unsigned int n;
276 211
277 prev = NULL; 212 n = BITMAP_PAGE_BITS;
278 while (swap_map) { 213 while (bitmap && n <= bit) {
279 next = swap_map->next; 214 n += BITMAP_PAGE_BITS;
280 swap_map->next = prev; 215 bitmap = bitmap->next;
281 prev = swap_map;
282 swap_map = next;
283 } 216 }
284 return prev; 217 if (!bitmap)
218 return -EINVAL;
219 n -= BITMAP_PAGE_BITS;
220 bit -= n;
221 n = 0;
222 while (bit >= BITS_PER_CHUNK) {
223 bit -= BITS_PER_CHUNK;
224 n++;
225 }
226 bitmap->chunks[n] |= (1UL << bit);
227 return 0;
285} 228}
286 229
287/** 230static unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap)
288 * free_swap_map_entries - free the swap entries allocated to store
289 * the swap map @swap_map (this is only called in case of an error)
290 */
291static inline void free_swap_map_entries(struct swap_map_page *swap_map)
292{ 231{
293 while (swap_map) { 232 unsigned long offset;
294 if (swap_map->next_swap.val) 233
295 swap_free(swap_map->next_swap); 234 offset = swp_offset(get_swap_page_of_type(swap));
296 swap_map = swap_map->next; 235 if (offset) {
236 if (bitmap_set(bitmap, offset)) {
237 swap_free(swp_entry(swap, offset));
238 offset = 0;
239 }
297 } 240 }
241 return offset;
298} 242}
299 243
300/** 244static void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
301 * save_swap_map - save the swap map used for tracing the data pages
302 * stored in the swap
303 */
304
305static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start)
306{ 245{
307 swp_entry_t entry = (swp_entry_t){0}; 246 unsigned int bit, n;
308 int error; 247 unsigned long test;
309 248
310 while (swap_map) { 249 bit = 0;
311 swap_map->next_swap = entry; 250 while (bitmap) {
312 if ((error = write_page((unsigned long)swap_map, &entry))) 251 for (n = 0; n < BITMAP_PAGE_CHUNKS; n++)
313 return error; 252 for (test = 1UL; test; test <<= 1) {
314 swap_map = swap_map->next; 253 if (bitmap->chunks[n] & test)
254 swap_free(swp_entry(swap, bit));
255 bit++;
256 }
257 bitmap = bitmap->next;
315 } 258 }
316 *start = entry;
317 return 0;
318} 259}
319 260
320/** 261/**
321 * free_image_entries - free the swap entries allocated to store 262 * write_page - Write one page to given swap location.
322 * the image data pages (this is only called in case of an error) 263 * @buf: Address we're writing.
264 * @offset: Offset of the swap page we're writing to.
323 */ 265 */
324 266
325static inline void free_image_entries(struct swap_map_page *swp) 267static int write_page(void *buf, unsigned long offset)
326{ 268{
327 unsigned k; 269 swp_entry_t entry;
270 int error = -ENOSPC;
328 271
329 while (swp) { 272 if (offset) {
330 for (k = 0; k < MAP_PAGE_SIZE; k++) 273 entry = swp_entry(root_swap, offset);
331 if (swp->entries[k].val) 274 error = rw_swap_page_sync(WRITE, entry, virt_to_page(buf));
332 swap_free(swp->entries[k]);
333 swp = swp->next;
334 } 275 }
276 return error;
335} 277}
336 278
279/*
280 * The swap map is a data structure used for keeping track of each page
281 * written to a swap partition. It consists of many swap_map_page
282 * structures that contain each an array of MAP_PAGE_SIZE swap entries.
283 * These structures are stored on the swap and linked together with the
284 * help of the .next_swap member.
285 *
286 * The swap map is created during suspend. The swap map pages are
287 * allocated and populated one at a time, so we only need one memory
288 * page to set up the entire structure.
289 *
290 * During resume we also only need to use one swap_map_page structure
291 * at a time.
292 */
293
294#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(long) - 1)
295
296struct swap_map_page {
297 unsigned long entries[MAP_PAGE_ENTRIES];
298 unsigned long next_swap;
299};
300
337/** 301/**
338 * The swap_map_handle structure is used for handling the swap map in 302 * The swap_map_handle structure is used for handling swap in
339 * a file-alike way 303 * a file-alike way
340 */ 304 */
341 305
342struct swap_map_handle { 306struct swap_map_handle {
343 struct swap_map_page *cur; 307 struct swap_map_page *cur;
308 unsigned long cur_swap;
309 struct bitmap_page *bitmap;
344 unsigned int k; 310 unsigned int k;
345}; 311};
346 312
347static inline void init_swap_map_handle(struct swap_map_handle *handle, 313static void release_swap_writer(struct swap_map_handle *handle)
348 struct swap_map_page *map)
349{ 314{
350 handle->cur = map; 315 if (handle->cur)
316 free_page((unsigned long)handle->cur);
317 handle->cur = NULL;
318 if (handle->bitmap)
319 free_bitmap(handle->bitmap);
320 handle->bitmap = NULL;
321}
322
323static int get_swap_writer(struct swap_map_handle *handle)
324{
325 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
326 if (!handle->cur)
327 return -ENOMEM;
328 handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
329 if (!handle->bitmap) {
330 release_swap_writer(handle);
331 return -ENOMEM;
332 }
333 handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap);
334 if (!handle->cur_swap) {
335 release_swap_writer(handle);
336 return -ENOSPC;
337 }
351 handle->k = 0; 338 handle->k = 0;
339 return 0;
352} 340}
353 341
354static inline int swap_map_write_page(struct swap_map_handle *handle, 342static int swap_write_page(struct swap_map_handle *handle, void *buf)
355 unsigned long addr)
356{ 343{
357 int error; 344 int error;
345 unsigned long offset;
358 346
359 error = write_page(addr, handle->cur->entries + handle->k); 347 if (!handle->cur)
348 return -EINVAL;
349 offset = alloc_swap_page(root_swap, handle->bitmap);
350 error = write_page(buf, offset);
360 if (error) 351 if (error)
361 return error; 352 return error;
362 if (++handle->k >= MAP_PAGE_SIZE) { 353 handle->cur->entries[handle->k++] = offset;
363 handle->cur = handle->cur->next; 354 if (handle->k >= MAP_PAGE_ENTRIES) {
355 offset = alloc_swap_page(root_swap, handle->bitmap);
356 if (!offset)
357 return -ENOSPC;
358 handle->cur->next_swap = offset;
359 error = write_page(handle->cur, handle->cur_swap);
360 if (error)
361 return error;
362 memset(handle->cur, 0, PAGE_SIZE);
363 handle->cur_swap = offset;
364 handle->k = 0; 364 handle->k = 0;
365 } 365 }
366 return 0; 366 return 0;
367} 367}
368 368
369static int flush_swap_writer(struct swap_map_handle *handle)
370{
371 if (handle->cur && handle->cur_swap)
372 return write_page(handle->cur, handle->cur_swap);
373 else
374 return -EINVAL;
375}
376
369/** 377/**
370 * save_image_data - save the data pages pointed to by the PBEs 378 * save_image - save the suspend image data
371 * from the list @pblist using the swap map handle @handle
372 * (assume there are @nr_pages data pages to save)
373 */ 379 */
374 380
375static int save_image_data(struct pbe *pblist, 381static int save_image(struct swap_map_handle *handle,
376 struct swap_map_handle *handle, 382 struct snapshot_handle *snapshot,
377 unsigned int nr_pages) 383 unsigned int nr_pages)
378{ 384{
379 unsigned int m; 385 unsigned int m;
380 struct pbe *p; 386 int ret;
381 int error = 0; 387 int error = 0;
382 388
383 printk("Saving image data pages (%u pages) ... ", nr_pages); 389 printk("Saving image data pages (%u pages) ... ", nr_pages);
@@ -385,98 +391,22 @@ static int save_image_data(struct pbe *pblist,
385 if (!m) 391 if (!m)
386 m = 1; 392 m = 1;
387 nr_pages = 0; 393 nr_pages = 0;
388 for_each_pbe (p, pblist) { 394 do {
389 error = swap_map_write_page(handle, p->address); 395 ret = snapshot_read_next(snapshot, PAGE_SIZE);
390 if (error) 396 if (ret > 0) {
391 break; 397 error = swap_write_page(handle, data_of(*snapshot));
392 if (!(nr_pages % m)) 398 if (error)
393 printk("\b\b\b\b%3d%%", nr_pages / m); 399 break;
394 nr_pages++; 400 if (!(nr_pages % m))
395 } 401 printk("\b\b\b\b%3d%%", nr_pages / m);
402 nr_pages++;
403 }
404 } while (ret > 0);
396 if (!error) 405 if (!error)
397 printk("\b\b\b\bdone\n"); 406 printk("\b\b\b\bdone\n");
398 return error; 407 return error;
399} 408}
400 409
401static void dump_info(void)
402{
403 pr_debug(" swsusp: Version: %u\n",swsusp_info.version_code);
404 pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info.num_physpages);
405 pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info.uts.sysname);
406 pr_debug(" swsusp: UTS Node: %s\n",swsusp_info.uts.nodename);
407 pr_debug(" swsusp: UTS Release: %s\n",swsusp_info.uts.release);
408 pr_debug(" swsusp: UTS Version: %s\n",swsusp_info.uts.version);
409 pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info.uts.machine);
410 pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname);
411 pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus);
412 pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages);
413 pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages);
414}
415
416static void init_header(unsigned int nr_pages)
417{
418 memset(&swsusp_info, 0, sizeof(swsusp_info));
419 swsusp_info.version_code = LINUX_VERSION_CODE;
420 swsusp_info.num_physpages = num_physpages;
421 memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname));
422
423 swsusp_info.cpus = num_online_cpus();
424 swsusp_info.image_pages = nr_pages;
425 swsusp_info.pages = nr_pages +
426 ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
427}
428
429/**
430 * pack_orig_addresses - the .orig_address fields of the PBEs from the
431 * list starting at @pbe are stored in the array @buf[] (1 page)
432 */
433
434static inline struct pbe *pack_orig_addresses(unsigned long *buf,
435 struct pbe *pbe)
436{
437 int j;
438
439 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
440 buf[j] = pbe->orig_address;
441 pbe = pbe->next;
442 }
443 if (!pbe)
444 for (; j < PAGE_SIZE / sizeof(long); j++)
445 buf[j] = 0;
446 return pbe;
447}
448
449/**
450 * save_image_metadata - save the .orig_address fields of the PBEs
451 * from the list @pblist using the swap map handle @handle
452 */
453
454static int save_image_metadata(struct pbe *pblist,
455 struct swap_map_handle *handle)
456{
457 unsigned long *buf;
458 unsigned int n = 0;
459 struct pbe *p;
460 int error = 0;
461
462 printk("Saving image metadata ... ");
463 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
464 if (!buf)
465 return -ENOMEM;
466 p = pblist;
467 while (p) {
468 p = pack_orig_addresses(buf, p);
469 error = swap_map_write_page(handle, (unsigned long)buf);
470 if (error)
471 break;
472 n++;
473 }
474 free_page((unsigned long)buf);
475 if (!error)
476 printk("done (%u pages saved)\n", n);
477 return error;
478}
479
480/** 410/**
481 * enough_swap - Make sure we have enough swap to save the image. 411 * enough_swap - Make sure we have enough swap to save the image.
482 * 412 *
@@ -486,8 +416,7 @@ static int save_image_metadata(struct pbe *pblist,
486 416
487static int enough_swap(unsigned int nr_pages) 417static int enough_swap(unsigned int nr_pages)
488{ 418{
489 unsigned int free_swap = swap_info[root_swap].pages - 419 unsigned int free_swap = count_swap_pages(root_swap, 1);
490 swap_info[root_swap].inuse_pages;
491 420
492 pr_debug("swsusp: free swap pages: %u\n", free_swap); 421 pr_debug("swsusp: free swap pages: %u\n", free_swap);
493 return free_swap > (nr_pages + PAGES_FOR_IO + 422 return free_swap > (nr_pages + PAGES_FOR_IO +
@@ -503,57 +432,44 @@ static int enough_swap(unsigned int nr_pages)
503 * correctly, we'll mark system clean, anyway.) 432 * correctly, we'll mark system clean, anyway.)
504 */ 433 */
505 434
506int swsusp_write(struct pbe *pblist, unsigned int nr_pages) 435int swsusp_write(void)
507{ 436{
508 struct swap_map_page *swap_map;
509 struct swap_map_handle handle; 437 struct swap_map_handle handle;
510 swp_entry_t start; 438 struct snapshot_handle snapshot;
439 struct swsusp_info *header;
440 unsigned long start;
511 int error; 441 int error;
512 442
513 if ((error = swsusp_swap_check())) { 443 if ((error = swsusp_swap_check())) {
514 printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); 444 printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n");
515 return error; 445 return error;
516 } 446 }
517 if (!enough_swap(nr_pages)) { 447 memset(&snapshot, 0, sizeof(struct snapshot_handle));
448 error = snapshot_read_next(&snapshot, PAGE_SIZE);
449 if (error < PAGE_SIZE)
450 return error < 0 ? error : -EFAULT;
451 header = (struct swsusp_info *)data_of(snapshot);
452 if (!enough_swap(header->pages)) {
518 printk(KERN_ERR "swsusp: Not enough free swap\n"); 453 printk(KERN_ERR "swsusp: Not enough free swap\n");
519 return -ENOSPC; 454 return -ENOSPC;
520 } 455 }
521 456 error = get_swap_writer(&handle);
522 init_header(nr_pages); 457 if (!error) {
523 swap_map = alloc_swap_map(swsusp_info.pages); 458 start = handle.cur_swap;
524 if (!swap_map) 459 error = swap_write_page(&handle, header);
525 return -ENOMEM; 460 }
526 init_swap_map_handle(&handle, swap_map);
527
528 error = swap_map_write_page(&handle, (unsigned long)&swsusp_info);
529 if (!error)
530 error = save_image_metadata(pblist, &handle);
531 if (!error) 461 if (!error)
532 error = save_image_data(pblist, &handle, nr_pages); 462 error = save_image(&handle, &snapshot, header->pages - 1);
533 if (error) 463 if (!error) {
534 goto Free_image_entries; 464 flush_swap_writer(&handle);
535 465 printk("S");
536 swap_map = reverse_swap_map(swap_map); 466 error = mark_swapfiles(swp_entry(root_swap, start));
537 error = save_swap_map(swap_map, &start); 467 printk("|\n");
538 if (error) 468 }
539 goto Free_map_entries;
540
541 dump_info();
542 printk( "S" );
543 error = mark_swapfiles(start);
544 printk( "|\n" );
545 if (error) 469 if (error)
546 goto Free_map_entries; 470 free_all_swap_pages(root_swap, handle.bitmap);
547 471 release_swap_writer(&handle);
548Free_swap_map:
549 free_swap_map(swap_map);
550 return error; 472 return error;
551
552Free_map_entries:
553 free_swap_map_entries(swap_map);
554Free_image_entries:
555 free_image_entries(swap_map);
556 goto Free_swap_map;
557} 473}
558 474
559/** 475/**
@@ -663,45 +579,6 @@ int swsusp_resume(void)
663 return error; 579 return error;
664} 580}
665 581
666/**
667 * mark_unsafe_pages - mark the pages that cannot be used for storing
668 * the image during resume, because they conflict with the pages that
669 * had been used before suspend
670 */
671
672static void mark_unsafe_pages(struct pbe *pblist)
673{
674 struct zone *zone;
675 unsigned long zone_pfn;
676 struct pbe *p;
677
678 if (!pblist) /* a sanity check */
679 return;
680
681 /* Clear page flags */
682 for_each_zone (zone) {
683 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
684 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
685 ClearPageNosaveFree(pfn_to_page(zone_pfn +
686 zone->zone_start_pfn));
687 }
688
689 /* Mark orig addresses */
690 for_each_pbe (p, pblist)
691 SetPageNosaveFree(virt_to_page(p->orig_address));
692
693}
694
695static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
696{
697 /* We assume both lists contain the same number of elements */
698 while (src) {
699 dst->orig_address = src->orig_address;
700 dst = dst->next;
701 src = src->next;
702 }
703}
704
705/* 582/*
706 * Using bio to read from swap. 583 * Using bio to read from swap.
707 * This code requires a bit more work than just using buffer heads 584 * This code requires a bit more work than just using buffer heads
@@ -779,14 +656,14 @@ static int bio_write_page(pgoff_t page_off, void *page)
779 * in a file-alike way 656 * in a file-alike way
780 */ 657 */
781 658
782static inline void release_swap_map_reader(struct swap_map_handle *handle) 659static void release_swap_reader(struct swap_map_handle *handle)
783{ 660{
784 if (handle->cur) 661 if (handle->cur)
785 free_page((unsigned long)handle->cur); 662 free_page((unsigned long)handle->cur);
786 handle->cur = NULL; 663 handle->cur = NULL;
787} 664}
788 665
789static inline int get_swap_map_reader(struct swap_map_handle *handle, 666static int get_swap_reader(struct swap_map_handle *handle,
790 swp_entry_t start) 667 swp_entry_t start)
791{ 668{
792 int error; 669 int error;
@@ -798,149 +675,80 @@ static inline int get_swap_map_reader(struct swap_map_handle *handle,
798 return -ENOMEM; 675 return -ENOMEM;
799 error = bio_read_page(swp_offset(start), handle->cur); 676 error = bio_read_page(swp_offset(start), handle->cur);
800 if (error) { 677 if (error) {
801 release_swap_map_reader(handle); 678 release_swap_reader(handle);
802 return error; 679 return error;
803 } 680 }
804 handle->k = 0; 681 handle->k = 0;
805 return 0; 682 return 0;
806} 683}
807 684
808static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf) 685static int swap_read_page(struct swap_map_handle *handle, void *buf)
809{ 686{
810 unsigned long offset; 687 unsigned long offset;
811 int error; 688 int error;
812 689
813 if (!handle->cur) 690 if (!handle->cur)
814 return -EINVAL; 691 return -EINVAL;
815 offset = swp_offset(handle->cur->entries[handle->k]); 692 offset = handle->cur->entries[handle->k];
816 if (!offset) 693 if (!offset)
817 return -EINVAL; 694 return -EFAULT;
818 error = bio_read_page(offset, buf); 695 error = bio_read_page(offset, buf);
819 if (error) 696 if (error)
820 return error; 697 return error;
821 if (++handle->k >= MAP_PAGE_SIZE) { 698 if (++handle->k >= MAP_PAGE_ENTRIES) {
822 handle->k = 0; 699 handle->k = 0;
823 offset = swp_offset(handle->cur->next_swap); 700 offset = handle->cur->next_swap;
824 if (!offset) 701 if (!offset)
825 release_swap_map_reader(handle); 702 release_swap_reader(handle);
826 else 703 else
827 error = bio_read_page(offset, handle->cur); 704 error = bio_read_page(offset, handle->cur);
828 } 705 }
829 return error; 706 return error;
830} 707}
831 708
832static int check_header(void)
833{
834 char *reason = NULL;
835
836 dump_info();
837 if (swsusp_info.version_code != LINUX_VERSION_CODE)
838 reason = "kernel version";
839 if (swsusp_info.num_physpages != num_physpages)
840 reason = "memory size";
841 if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
842 reason = "system type";
843 if (strcmp(swsusp_info.uts.release,system_utsname.release))
844 reason = "kernel release";
845 if (strcmp(swsusp_info.uts.version,system_utsname.version))
846 reason = "version";
847 if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
848 reason = "machine";
849 if (reason) {
850 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
851 return -EPERM;
852 }
853 return 0;
854}
855
856/** 709/**
857 * load_image_data - load the image data using the swap map handle 710 * load_image - load the image using the swap map handle
858 * @handle and store them using the page backup list @pblist 711 * @handle and the snapshot handle @snapshot
859 * (assume there are @nr_pages pages to load) 712 * (assume there are @nr_pages pages to load)
860 */ 713 */
861 714
862static int load_image_data(struct pbe *pblist, 715static int load_image(struct swap_map_handle *handle,
863 struct swap_map_handle *handle, 716 struct snapshot_handle *snapshot,
864 unsigned int nr_pages) 717 unsigned int nr_pages)
865{ 718{
866 int error;
867 unsigned int m; 719 unsigned int m;
868 struct pbe *p; 720 int ret;
721 int error = 0;
869 722
870 if (!pblist)
871 return -EINVAL;
872 printk("Loading image data pages (%u pages) ... ", nr_pages); 723 printk("Loading image data pages (%u pages) ... ", nr_pages);
873 m = nr_pages / 100; 724 m = nr_pages / 100;
874 if (!m) 725 if (!m)
875 m = 1; 726 m = 1;
876 nr_pages = 0; 727 nr_pages = 0;
877 p = pblist; 728 do {
878 while (p) { 729 ret = snapshot_write_next(snapshot, PAGE_SIZE);
879 error = swap_map_read_page(handle, (void *)p->address); 730 if (ret > 0) {
880 if (error) 731 error = swap_read_page(handle, data_of(*snapshot));
881 break; 732 if (error)
882 p = p->next; 733 break;
883 if (!(nr_pages % m)) 734 if (!(nr_pages % m))
884 printk("\b\b\b\b%3d%%", nr_pages / m); 735 printk("\b\b\b\b%3d%%", nr_pages / m);
885 nr_pages++; 736 nr_pages++;
886 } 737 }
738 } while (ret > 0);
887 if (!error) 739 if (!error)
888 printk("\b\b\b\bdone\n"); 740 printk("\b\b\b\bdone\n");
741 if (!snapshot_image_loaded(snapshot))
742 error = -ENODATA;
889 return error; 743 return error;
890} 744}
891 745
892/** 746int swsusp_read(void)
893 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
894 * the PBEs in the list starting at @pbe
895 */
896
897static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
898 struct pbe *pbe)
899{
900 int j;
901
902 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
903 pbe->orig_address = buf[j];
904 pbe = pbe->next;
905 }
906 return pbe;
907}
908
909/**
910 * load_image_metadata - load the image metadata using the swap map
911 * handle @handle and put them into the PBEs in the list @pblist
912 */
913
914static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle)
915{
916 struct pbe *p;
917 unsigned long *buf;
918 unsigned int n = 0;
919 int error = 0;
920
921 printk("Loading image metadata ... ");
922 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
923 if (!buf)
924 return -ENOMEM;
925 p = pblist;
926 while (p) {
927 error = swap_map_read_page(handle, buf);
928 if (error)
929 break;
930 p = unpack_orig_addresses(buf, p);
931 n++;
932 }
933 free_page((unsigned long)buf);
934 if (!error)
935 printk("done (%u pages loaded)\n", n);
936 return error;
937}
938
939int swsusp_read(struct pbe **pblist_ptr)
940{ 747{
941 int error; 748 int error;
942 struct pbe *p, *pblist;
943 struct swap_map_handle handle; 749 struct swap_map_handle handle;
750 struct snapshot_handle snapshot;
751 struct swsusp_info *header;
944 unsigned int nr_pages; 752 unsigned int nr_pages;
945 753
946 if (IS_ERR(resume_bdev)) { 754 if (IS_ERR(resume_bdev)) {
@@ -948,38 +756,19 @@ int swsusp_read(struct pbe **pblist_ptr)
948 return PTR_ERR(resume_bdev); 756 return PTR_ERR(resume_bdev);
949 } 757 }
950 758
951 error = get_swap_map_reader(&handle, swsusp_header.image); 759 memset(&snapshot, 0, sizeof(struct snapshot_handle));
760 error = snapshot_write_next(&snapshot, PAGE_SIZE);
761 if (error < PAGE_SIZE)
762 return error < 0 ? error : -EFAULT;
763 header = (struct swsusp_info *)data_of(snapshot);
764 error = get_swap_reader(&handle, swsusp_header.image);
952 if (!error) 765 if (!error)
953 error = swap_map_read_page(&handle, &swsusp_info); 766 error = swap_read_page(&handle, header);
954 if (!error)
955 error = check_header();
956 if (error)
957 return error;
958 nr_pages = swsusp_info.image_pages;
959 p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0);
960 if (!p)
961 return -ENOMEM;
962 error = load_image_metadata(p, &handle);
963 if (!error) { 767 if (!error) {
964 mark_unsafe_pages(p); 768 nr_pages = header->image_pages;
965 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); 769 error = load_image(&handle, &snapshot, nr_pages);
966 if (pblist)
967 copy_page_backup_list(pblist, p);
968 free_pagedir(p);
969 if (!pblist)
970 error = -ENOMEM;
971
972 /* Allocate memory for the image and read the data from swap */
973 if (!error)
974 error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
975 if (!error) {
976 release_eaten_pages();
977 error = load_image_data(pblist, &handle, nr_pages);
978 }
979 if (!error)
980 *pblist_ptr = pblist;
981 } 770 }
982 release_swap_map_reader(&handle); 771 release_swap_reader(&handle);
983 772
984 blkdev_put(resume_bdev); 773 blkdev_put(resume_bdev);
985 774