diff options
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r-- | kernel/power/snapshot.c | 89 |
1 files changed, 74 insertions, 15 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 4a6dbcefd378..41f66365f0d8 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -33,7 +33,35 @@ | |||
33 | 33 | ||
34 | #include "power.h" | 34 | #include "power.h" |
35 | 35 | ||
36 | struct pbe *pagedir_nosave; | ||
37 | unsigned int nr_copy_pages; | ||
38 | |||
36 | #ifdef CONFIG_HIGHMEM | 39 | #ifdef CONFIG_HIGHMEM |
40 | unsigned int count_highmem_pages(void) | ||
41 | { | ||
42 | struct zone *zone; | ||
43 | unsigned long zone_pfn; | ||
44 | unsigned int n = 0; | ||
45 | |||
46 | for_each_zone (zone) | ||
47 | if (is_highmem(zone)) { | ||
48 | mark_free_pages(zone); | ||
49 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { | ||
50 | struct page *page; | ||
51 | unsigned long pfn = zone_pfn + zone->zone_start_pfn; | ||
52 | if (!pfn_valid(pfn)) | ||
53 | continue; | ||
54 | page = pfn_to_page(pfn); | ||
55 | if (PageReserved(page)) | ||
56 | continue; | ||
57 | if (PageNosaveFree(page)) | ||
58 | continue; | ||
59 | n++; | ||
60 | } | ||
61 | } | ||
62 | return n; | ||
63 | } | ||
64 | |||
37 | struct highmem_page { | 65 | struct highmem_page { |
38 | char *data; | 66 | char *data; |
39 | struct page *page; | 67 | struct page *page; |
@@ -149,17 +177,15 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn) | |||
149 | BUG_ON(PageReserved(page) && PageNosave(page)); | 177 | BUG_ON(PageReserved(page) && PageNosave(page)); |
150 | if (PageNosave(page)) | 178 | if (PageNosave(page)) |
151 | return 0; | 179 | return 0; |
152 | if (PageReserved(page) && pfn_is_nosave(pfn)) { | 180 | if (PageReserved(page) && pfn_is_nosave(pfn)) |
153 | pr_debug("[nosave pfn 0x%lx]", pfn); | ||
154 | return 0; | 181 | return 0; |
155 | } | ||
156 | if (PageNosaveFree(page)) | 182 | if (PageNosaveFree(page)) |
157 | return 0; | 183 | return 0; |
158 | 184 | ||
159 | return 1; | 185 | return 1; |
160 | } | 186 | } |
161 | 187 | ||
162 | static unsigned count_data_pages(void) | 188 | unsigned int count_data_pages(void) |
163 | { | 189 | { |
164 | struct zone *zone; | 190 | struct zone *zone; |
165 | unsigned long zone_pfn; | 191 | unsigned long zone_pfn; |
@@ -244,7 +270,7 @@ static inline void fill_pb_page(struct pbe *pbpage) | |||
244 | * of memory pages allocated with alloc_pagedir() | 270 | * of memory pages allocated with alloc_pagedir() |
245 | */ | 271 | */ |
246 | 272 | ||
247 | void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) | 273 | static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) |
248 | { | 274 | { |
249 | struct pbe *pbpage, *p; | 275 | struct pbe *pbpage, *p; |
250 | unsigned int num = PBES_PER_PAGE; | 276 | unsigned int num = PBES_PER_PAGE; |
@@ -261,7 +287,35 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) | |||
261 | p->next = p + 1; | 287 | p->next = p + 1; |
262 | p->next = NULL; | 288 | p->next = NULL; |
263 | } | 289 | } |
264 | pr_debug("create_pbe_list(): initialized %d PBEs\n", num); | 290 | } |
291 | |||
292 | /** | ||
293 | * On resume it is necessary to trace and eventually free the unsafe | ||
294 | * pages that have been allocated, because they are needed for I/O | ||
295 | * (on x86-64 we likely will "eat" these pages once again while | ||
296 | * creating the temporary page translation tables) | ||
297 | */ | ||
298 | |||
299 | struct eaten_page { | ||
300 | struct eaten_page *next; | ||
301 | char padding[PAGE_SIZE - sizeof(void *)]; | ||
302 | }; | ||
303 | |||
304 | static struct eaten_page *eaten_pages = NULL; | ||
305 | |||
306 | void release_eaten_pages(void) | ||
307 | { | ||
308 | struct eaten_page *p, *q; | ||
309 | |||
310 | p = eaten_pages; | ||
311 | while (p) { | ||
312 | q = p->next; | ||
313 | /* We don't want swsusp_free() to free this page again */ | ||
314 | ClearPageNosave(virt_to_page(p)); | ||
315 | free_page((unsigned long)p); | ||
316 | p = q; | ||
317 | } | ||
318 | eaten_pages = NULL; | ||
265 | } | 319 | } |
266 | 320 | ||
267 | /** | 321 | /** |
@@ -282,9 +336,12 @@ static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) | |||
282 | if (safe_needed) | 336 | if (safe_needed) |
283 | do { | 337 | do { |
284 | res = (void *)get_zeroed_page(gfp_mask); | 338 | res = (void *)get_zeroed_page(gfp_mask); |
285 | if (res && PageNosaveFree(virt_to_page(res))) | 339 | if (res && PageNosaveFree(virt_to_page(res))) { |
286 | /* This is for swsusp_free() */ | 340 | /* This is for swsusp_free() */ |
287 | SetPageNosave(virt_to_page(res)); | 341 | SetPageNosave(virt_to_page(res)); |
342 | ((struct eaten_page *)res)->next = eaten_pages; | ||
343 | eaten_pages = res; | ||
344 | } | ||
288 | } while (res && PageNosaveFree(virt_to_page(res))); | 345 | } while (res && PageNosaveFree(virt_to_page(res))); |
289 | else | 346 | else |
290 | res = (void *)get_zeroed_page(gfp_mask); | 347 | res = (void *)get_zeroed_page(gfp_mask); |
@@ -332,7 +389,8 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed | |||
332 | if (!pbe) { /* get_zeroed_page() failed */ | 389 | if (!pbe) { /* get_zeroed_page() failed */ |
333 | free_pagedir(pblist); | 390 | free_pagedir(pblist); |
334 | pblist = NULL; | 391 | pblist = NULL; |
335 | } | 392 | } else |
393 | create_pbe_list(pblist, nr_pages); | ||
336 | return pblist; | 394 | return pblist; |
337 | } | 395 | } |
338 | 396 | ||
@@ -370,8 +428,14 @@ void swsusp_free(void) | |||
370 | 428 | ||
371 | static int enough_free_mem(unsigned int nr_pages) | 429 | static int enough_free_mem(unsigned int nr_pages) |
372 | { | 430 | { |
373 | pr_debug("swsusp: available memory: %u pages\n", nr_free_pages()); | 431 | struct zone *zone; |
374 | return nr_free_pages() > (nr_pages + PAGES_FOR_IO + | 432 | unsigned int n = 0; |
433 | |||
434 | for_each_zone (zone) | ||
435 | if (!is_highmem(zone)) | ||
436 | n += zone->free_pages; | ||
437 | pr_debug("swsusp: available memory: %u pages\n", n); | ||
438 | return n > (nr_pages + PAGES_FOR_IO + | ||
375 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); | 439 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); |
376 | } | 440 | } |
377 | 441 | ||
@@ -395,7 +459,6 @@ static struct pbe *swsusp_alloc(unsigned int nr_pages) | |||
395 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); | 459 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); |
396 | return NULL; | 460 | return NULL; |
397 | } | 461 | } |
398 | create_pbe_list(pblist, nr_pages); | ||
399 | 462 | ||
400 | if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { | 463 | if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { |
401 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); | 464 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); |
@@ -421,10 +484,6 @@ asmlinkage int swsusp_save(void) | |||
421 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, | 484 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, |
422 | PAGES_FOR_IO, nr_free_pages()); | 485 | PAGES_FOR_IO, nr_free_pages()); |
423 | 486 | ||
424 | /* This is needed because of the fixed size of swsusp_info */ | ||
425 | if (MAX_PBES < (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE) | ||
426 | return -ENOSPC; | ||
427 | |||
428 | if (!enough_free_mem(nr_pages)) { | 487 | if (!enough_free_mem(nr_pages)) { |
429 | printk(KERN_ERR "swsusp: Not enough free memory\n"); | 488 | printk(KERN_ERR "swsusp: Not enough free memory\n"); |
430 | return -ENOMEM; | 489 | return -ENOMEM; |