diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2005-10-30 17:59:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 20:37:14 -0500 |
commit | 2c1b4a5ca48831595979a850f40ced8e7da026f8 (patch) | |
tree | 06fe8a400df8c5166c7f47ca2c30a584473f1170 /arch/x86_64 | |
parent | a0f496517f3e28d651d0cbbcf2d4fb701ed6957e (diff) |
[PATCH] swsusp: rework memory freeing on resume
The following patch makes swsusp use the PG_nosave and PG_nosave_free flags to
mark pages that should be freed in case of an error during resume.
This allows us to simplify the code and to use swsusp_free() in all of the
swsusp's resume error paths, which makes them actually work.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/suspend.c | 84 |
1 files changed, 19 insertions, 65 deletions
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index 02516823f514..fd2bef780882 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c | |||
@@ -147,57 +147,7 @@ extern int restore_image(void); | |||
147 | 147 | ||
148 | pgd_t *temp_level4_pgt; | 148 | pgd_t *temp_level4_pgt; |
149 | 149 | ||
150 | static void **pages; | 150 | static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) |
151 | |||
152 | static inline void *__add_page(void) | ||
153 | { | ||
154 | void **c; | ||
155 | |||
156 | c = (void **)get_usable_page(GFP_ATOMIC); | ||
157 | if (c) { | ||
158 | *c = pages; | ||
159 | pages = c; | ||
160 | } | ||
161 | return c; | ||
162 | } | ||
163 | |||
164 | static inline void *__next_page(void) | ||
165 | { | ||
166 | void **c; | ||
167 | |||
168 | c = pages; | ||
169 | if (c) { | ||
170 | pages = *c; | ||
171 | *c = NULL; | ||
172 | } | ||
173 | return c; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Try to allocate as many usable pages as needed and daisy chain them. | ||
178 | * If one allocation fails, free the pages allocated so far | ||
179 | */ | ||
180 | static int alloc_usable_pages(unsigned long n) | ||
181 | { | ||
182 | void *p; | ||
183 | |||
184 | pages = NULL; | ||
185 | do | ||
186 | if (!__add_page()) | ||
187 | break; | ||
188 | while (--n); | ||
189 | if (n) { | ||
190 | p = __next_page(); | ||
191 | while (p) { | ||
192 | free_page((unsigned long)p); | ||
193 | p = __next_page(); | ||
194 | } | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | ||
201 | { | 151 | { |
202 | long i, j; | 152 | long i, j; |
203 | 153 | ||
@@ -211,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e | |||
211 | if (paddr >= end) | 161 | if (paddr >= end) |
212 | break; | 162 | break; |
213 | 163 | ||
214 | pmd = (pmd_t *)__next_page(); | 164 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); |
165 | if (!pmd) | ||
166 | return -ENOMEM; | ||
215 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | 167 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
216 | for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { | 168 | for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { |
217 | unsigned long pe; | 169 | unsigned long pe; |
@@ -223,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e | |||
223 | set_pmd(pmd, __pmd(pe)); | 175 | set_pmd(pmd, __pmd(pe)); |
224 | } | 176 | } |
225 | } | 177 | } |
178 | return 0; | ||
226 | } | 179 | } |
227 | 180 | ||
228 | static void set_up_temporary_mappings(void) | 181 | static int set_up_temporary_mappings(void) |
229 | { | 182 | { |
230 | unsigned long start, end, next; | 183 | unsigned long start, end, next; |
184 | int error; | ||
231 | 185 | ||
232 | temp_level4_pgt = (pgd_t *)__next_page(); | 186 | temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); |
187 | if (!temp_level4_pgt) | ||
188 | return -ENOMEM; | ||
233 | 189 | ||
234 | /* It is safe to reuse the original kernel mapping */ | 190 | /* It is safe to reuse the original kernel mapping */ |
235 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), | 191 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), |
@@ -240,29 +196,27 @@ static void set_up_temporary_mappings(void) | |||
240 | end = (unsigned long)pfn_to_kaddr(end_pfn); | 196 | end = (unsigned long)pfn_to_kaddr(end_pfn); |
241 | 197 | ||
242 | for (; start < end; start = next) { | 198 | for (; start < end; start = next) { |
243 | pud_t *pud = (pud_t *)__next_page(); | 199 | pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); |
200 | if (!pud) | ||
201 | return -ENOMEM; | ||
244 | next = start + PGDIR_SIZE; | 202 | next = start + PGDIR_SIZE; |
245 | if (next > end) | 203 | if (next > end) |
246 | next = end; | 204 | next = end; |
247 | res_phys_pud_init(pud, __pa(start), __pa(next)); | 205 | if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) |
206 | return error; | ||
248 | set_pgd(temp_level4_pgt + pgd_index(start), | 207 | set_pgd(temp_level4_pgt + pgd_index(start), |
249 | mk_kernel_pgd(__pa(pud))); | 208 | mk_kernel_pgd(__pa(pud))); |
250 | } | 209 | } |
210 | return 0; | ||
251 | } | 211 | } |
252 | 212 | ||
253 | int swsusp_arch_resume(void) | 213 | int swsusp_arch_resume(void) |
254 | { | 214 | { |
255 | unsigned long n; | 215 | int error; |
256 | 216 | ||
257 | n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT; | ||
258 | n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1; | ||
259 | pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n); | ||
260 | if (alloc_usable_pages(n)) { | ||
261 | free_eaten_memory(); | ||
262 | return -ENOMEM; | ||
263 | } | ||
264 | /* We have got enough memory and from now on we cannot recover */ | 217 | /* We have got enough memory and from now on we cannot recover */ |
265 | set_up_temporary_mappings(); | 218 | if ((error = set_up_temporary_mappings())) |
219 | return error; | ||
266 | restore_image(); | 220 | restore_image(); |
267 | return 0; | 221 | return 0; |
268 | } | 222 | } |