aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/swsusp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/swsusp.c')
-rw-r--r--kernel/power/swsusp.c139
1 files changed, 74 insertions, 65 deletions
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 175370824f37..5da304c8f1f6 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -50,6 +50,7 @@
50#include <linux/syscalls.h> 50#include <linux/syscalls.h>
51#include <linux/highmem.h> 51#include <linux/highmem.h>
52#include <linux/time.h> 52#include <linux/time.h>
53#include <linux/rbtree.h>
53 54
54#include "power.h" 55#include "power.h"
55 56
@@ -74,72 +75,69 @@ static inline unsigned int count_highmem_pages(void) { return 0; }
74/** 75/**
75 * The following functions are used for tracing the allocated 76 * The following functions are used for tracing the allocated
76 * swap pages, so that they can be freed in case of an error. 77 * swap pages, so that they can be freed in case of an error.
77 *
78 * The functions operate on a linked bitmap structure defined
79 * in power.h
80 */ 78 */
81 79
82void free_bitmap(struct bitmap_page *bitmap) 80struct swsusp_extent {
83{ 81 struct rb_node node;
84 struct bitmap_page *bp; 82 unsigned long start;
83 unsigned long end;
84};
85 85
86 while (bitmap) { 86static struct rb_root swsusp_extents = RB_ROOT;
87 bp = bitmap->next;
88 free_page((unsigned long)bitmap);
89 bitmap = bp;
90 }
91}
92 87
93struct bitmap_page *alloc_bitmap(unsigned int nr_bits) 88static int swsusp_extents_insert(unsigned long swap_offset)
94{ 89{
95 struct bitmap_page *bitmap, *bp; 90 struct rb_node **new = &(swsusp_extents.rb_node);
96 unsigned int n; 91 struct rb_node *parent = NULL;
97 92 struct swsusp_extent *ext;
98 if (!nr_bits) 93
99 return NULL; 94 /* Figure out where to put the new node */
100 95 while (*new) {
101 bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); 96 ext = container_of(*new, struct swsusp_extent, node);
102 bp = bitmap; 97 parent = *new;
103 for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) { 98 if (swap_offset < ext->start) {
104 bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); 99 /* Try to merge */
105 bp = bp->next; 100 if (swap_offset == ext->start - 1) {
106 if (!bp) { 101 ext->start--;
107 free_bitmap(bitmap); 102 return 0;
108 return NULL; 103 }
104 new = &((*new)->rb_left);
105 } else if (swap_offset > ext->end) {
106 /* Try to merge */
107 if (swap_offset == ext->end + 1) {
108 ext->end++;
109 return 0;
110 }
111 new = &((*new)->rb_right);
112 } else {
113 /* It already is in the tree */
114 return -EINVAL;
109 } 115 }
110 } 116 }
111 return bitmap; 117 /* Add the new node and rebalance the tree. */
112} 118 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
113 119 if (!ext)
114static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit) 120 return -ENOMEM;
115{ 121
116 unsigned int n; 122 ext->start = swap_offset;
117 123 ext->end = swap_offset;
118 n = BITMAP_PAGE_BITS; 124 rb_link_node(&ext->node, parent, new);
119 while (bitmap && n <= bit) { 125 rb_insert_color(&ext->node, &swsusp_extents);
120 n += BITMAP_PAGE_BITS;
121 bitmap = bitmap->next;
122 }
123 if (!bitmap)
124 return -EINVAL;
125 n -= BITMAP_PAGE_BITS;
126 bit -= n;
127 n = 0;
128 while (bit >= BITS_PER_CHUNK) {
129 bit -= BITS_PER_CHUNK;
130 n++;
131 }
132 bitmap->chunks[n] |= (1UL << bit);
133 return 0; 126 return 0;
134} 127}
135 128
136sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap) 129/**
130 * alloc_swapdev_block - allocate a swap page and register that it has
131 * been allocated, so that it can be freed in case of an error.
132 */
133
134sector_t alloc_swapdev_block(int swap)
137{ 135{
138 unsigned long offset; 136 unsigned long offset;
139 137
140 offset = swp_offset(get_swap_page_of_type(swap)); 138 offset = swp_offset(get_swap_page_of_type(swap));
141 if (offset) { 139 if (offset) {
142 if (bitmap_set(bitmap, offset)) 140 if (swsusp_extents_insert(offset))
143 swap_free(swp_entry(swap, offset)); 141 swap_free(swp_entry(swap, offset));
144 else 142 else
145 return swapdev_block(swap, offset); 143 return swapdev_block(swap, offset);
@@ -147,23 +145,34 @@ sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
147 return 0; 145 return 0;
148} 146}
149 147
150void free_all_swap_pages(int swap, struct bitmap_page *bitmap) 148/**
149 * free_all_swap_pages - free swap pages allocated for saving image data.
150 * It also frees the extents used to register which swap entres had been
151 * allocated.
152 */
153
154void free_all_swap_pages(int swap)
151{ 155{
152 unsigned int bit, n; 156 struct rb_node *node;
153 unsigned long test; 157
154 158 while ((node = swsusp_extents.rb_node)) {
155 bit = 0; 159 struct swsusp_extent *ext;
156 while (bitmap) { 160 unsigned long offset;
157 for (n = 0; n < BITMAP_PAGE_CHUNKS; n++) 161
158 for (test = 1UL; test; test <<= 1) { 162 ext = container_of(node, struct swsusp_extent, node);
159 if (bitmap->chunks[n] & test) 163 rb_erase(node, &swsusp_extents);
160 swap_free(swp_entry(swap, bit)); 164 for (offset = ext->start; offset <= ext->end; offset++)
161 bit++; 165 swap_free(swp_entry(swap, offset));
162 } 166
163 bitmap = bitmap->next; 167 kfree(ext);
164 } 168 }
165} 169}
166 170
171int swsusp_swap_in_use(void)
172{
173 return (swsusp_extents.rb_node != NULL);
174}
175
167/** 176/**
168 * swsusp_show_speed - print the time elapsed between two events represented by 177 * swsusp_show_speed - print the time elapsed between two events represented by
169 * @start and @stop 178 * @start and @stop
@@ -224,7 +233,7 @@ int swsusp_shrink_memory(void)
224 long size, highmem_size; 233 long size, highmem_size;
225 234
226 highmem_size = count_highmem_pages(); 235 highmem_size = count_highmem_pages();
227 size = count_data_pages() + PAGES_FOR_IO; 236 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
228 tmp = size; 237 tmp = size;
229 size += highmem_size; 238 size += highmem_size;
230 for_each_zone (zone) 239 for_each_zone (zone)