diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/power.h | 3 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 602 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 5 |
3 files changed, 540 insertions, 70 deletions
diff --git a/kernel/power/power.h b/kernel/power/power.h index e18ba207e784..6e9e2acc34f8 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -109,9 +109,10 @@ struct snapshot_handle { | |||
109 | */ | 109 | */ |
110 | #define data_of(handle) ((handle).buffer + (handle).buf_offset) | 110 | #define data_of(handle) ((handle).buffer + (handle).buf_offset) |
111 | 111 | ||
112 | extern unsigned int snapshot_additional_pages(struct zone *zone); | ||
112 | extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); | 113 | extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); |
113 | extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); | 114 | extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); |
114 | int snapshot_image_loaded(struct snapshot_handle *handle); | 115 | extern int snapshot_image_loaded(struct snapshot_handle *handle); |
115 | 116 | ||
116 | #define SNAPSHOT_IOC_MAGIC '3' | 117 | #define SNAPSHOT_IOC_MAGIC '3' |
117 | #define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) | 118 | #define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d0d691f976d8..852e0df41719 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -211,6 +211,467 @@ static inline void free_image_page(void *addr, int clear_nosave_free) | |||
211 | free_page((unsigned long)addr); | 211 | free_page((unsigned long)addr); |
212 | } | 212 | } |
213 | 213 | ||
214 | /* struct linked_page is used to build chains of pages */ | ||
215 | |||
216 | #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) | ||
217 | |||
218 | struct linked_page { | ||
219 | struct linked_page *next; | ||
220 | char data[LINKED_PAGE_DATA_SIZE]; | ||
221 | } __attribute__((packed)); | ||
222 | |||
223 | static inline void | ||
224 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) | ||
225 | { | ||
226 | while (list) { | ||
227 | struct linked_page *lp = list->next; | ||
228 | |||
229 | free_image_page(list, clear_page_nosave); | ||
230 | list = lp; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * struct chain_allocator is used for allocating small objects out of | ||
236 | * a linked list of pages called 'the chain'. | ||
237 | * | ||
238 | * The chain grows each time when there is no room for a new object in | ||
239 | * the current page. The allocated objects cannot be freed individually. | ||
240 | * It is only possible to free them all at once, by freeing the entire | ||
241 | * chain. | ||
242 | * | ||
243 | * NOTE: The chain allocator may be inefficient if the allocated objects | ||
244 | * are not much smaller than PAGE_SIZE. | ||
245 | */ | ||
246 | |||
247 | struct chain_allocator { | ||
248 | struct linked_page *chain; /* the chain */ | ||
249 | unsigned int used_space; /* total size of objects allocated out | ||
250 | * of the current page | ||
251 | */ | ||
252 | gfp_t gfp_mask; /* mask for allocating pages */ | ||
253 | int safe_needed; /* if set, only "safe" pages are allocated */ | ||
254 | }; | ||
255 | |||
256 | static void | ||
257 | chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) | ||
258 | { | ||
259 | ca->chain = NULL; | ||
260 | ca->used_space = LINKED_PAGE_DATA_SIZE; | ||
261 | ca->gfp_mask = gfp_mask; | ||
262 | ca->safe_needed = safe_needed; | ||
263 | } | ||
264 | |||
265 | static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | ||
266 | { | ||
267 | void *ret; | ||
268 | |||
269 | if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { | ||
270 | struct linked_page *lp; | ||
271 | |||
272 | lp = alloc_image_page(ca->gfp_mask, ca->safe_needed); | ||
273 | if (!lp) | ||
274 | return NULL; | ||
275 | |||
276 | lp->next = ca->chain; | ||
277 | ca->chain = lp; | ||
278 | ca->used_space = 0; | ||
279 | } | ||
280 | ret = ca->chain->data + ca->used_space; | ||
281 | ca->used_space += size; | ||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | ||
286 | { | ||
287 | free_list_of_pages(ca->chain, clear_page_nosave); | ||
288 | memset(ca, 0, sizeof(struct chain_allocator)); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * Data types related to memory bitmaps. | ||
293 | * | ||
294 | * Memory bitmap is a structure consiting of many linked lists of | ||
295 | * objects. The main list's elements are of type struct zone_bitmap | ||
296 | * and each of them corresonds to one zone. For each zone bitmap | ||
297 | * object there is a list of objects of type struct bm_block that | ||
298 | * represent each blocks of bit chunks in which information is | ||
299 | * stored. | ||
300 | * | ||
301 | * struct memory_bitmap contains a pointer to the main list of zone | ||
302 | * bitmap objects, a struct bm_position used for browsing the bitmap, | ||
303 | * and a pointer to the list of pages used for allocating all of the | ||
304 | * zone bitmap objects and bitmap block objects. | ||
305 | * | ||
306 | * NOTE: It has to be possible to lay out the bitmap in memory | ||
307 | * using only allocations of order 0. Additionally, the bitmap is | ||
308 | * designed to work with arbitrary number of zones (this is over the | ||
309 | * top for now, but let's avoid making unnecessary assumptions ;-). | ||
310 | * | ||
311 | * struct zone_bitmap contains a pointer to a list of bitmap block | ||
312 | * objects and a pointer to the bitmap block object that has been | ||
313 | * most recently used for setting bits. Additionally, it contains the | ||
314 | * pfns that correspond to the start and end of the represented zone. | ||
315 | * | ||
316 | * struct bm_block contains a pointer to the memory page in which | ||
317 | * information is stored (in the form of a block of bit chunks | ||
318 | * of type unsigned long each). It also contains the pfns that | ||
319 | * correspond to the start and end of the represented memory area and | ||
320 | * the number of bit chunks in the block. | ||
321 | * | ||
322 | * NOTE: Memory bitmaps are used for two types of operations only: | ||
323 | * "set a bit" and "find the next bit set". Moreover, the searching | ||
324 | * is always carried out after all of the "set a bit" operations | ||
325 | * on given bitmap. | ||
326 | */ | ||
327 | |||
328 | #define BM_END_OF_MAP (~0UL) | ||
329 | |||
330 | #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long)) | ||
331 | #define BM_BITS_PER_CHUNK (sizeof(long) << 3) | ||
332 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) | ||
333 | |||
334 | struct bm_block { | ||
335 | struct bm_block *next; /* next element of the list */ | ||
336 | unsigned long start_pfn; /* pfn represented by the first bit */ | ||
337 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | ||
338 | unsigned int size; /* number of bit chunks */ | ||
339 | unsigned long *data; /* chunks of bits representing pages */ | ||
340 | }; | ||
341 | |||
342 | struct zone_bitmap { | ||
343 | struct zone_bitmap *next; /* next element of the list */ | ||
344 | unsigned long start_pfn; /* minimal pfn in this zone */ | ||
345 | unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ | ||
346 | struct bm_block *bm_blocks; /* list of bitmap blocks */ | ||
347 | struct bm_block *cur_block; /* recently used bitmap block */ | ||
348 | }; | ||
349 | |||
350 | /* strcut bm_position is used for browsing memory bitmaps */ | ||
351 | |||
352 | struct bm_position { | ||
353 | struct zone_bitmap *zone_bm; | ||
354 | struct bm_block *block; | ||
355 | int chunk; | ||
356 | int bit; | ||
357 | }; | ||
358 | |||
359 | struct memory_bitmap { | ||
360 | struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ | ||
361 | struct linked_page *p_list; /* list of pages used to store zone | ||
362 | * bitmap objects and bitmap block | ||
363 | * objects | ||
364 | */ | ||
365 | struct bm_position cur; /* most recently used bit position */ | ||
366 | }; | ||
367 | |||
368 | /* Functions that operate on memory bitmaps */ | ||
369 | |||
370 | static inline void memory_bm_reset_chunk(struct memory_bitmap *bm) | ||
371 | { | ||
372 | bm->cur.chunk = 0; | ||
373 | bm->cur.bit = -1; | ||
374 | } | ||
375 | |||
376 | static void memory_bm_position_reset(struct memory_bitmap *bm) | ||
377 | { | ||
378 | struct zone_bitmap *zone_bm; | ||
379 | |||
380 | zone_bm = bm->zone_bm_list; | ||
381 | bm->cur.zone_bm = zone_bm; | ||
382 | bm->cur.block = zone_bm->bm_blocks; | ||
383 | memory_bm_reset_chunk(bm); | ||
384 | } | ||
385 | |||
386 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | ||
387 | |||
388 | /** | ||
389 | * create_bm_block_list - create a list of block bitmap objects | ||
390 | */ | ||
391 | |||
392 | static inline struct bm_block * | ||
393 | create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) | ||
394 | { | ||
395 | struct bm_block *bblist = NULL; | ||
396 | |||
397 | while (nr_blocks-- > 0) { | ||
398 | struct bm_block *bb; | ||
399 | |||
400 | bb = chain_alloc(ca, sizeof(struct bm_block)); | ||
401 | if (!bb) | ||
402 | return NULL; | ||
403 | |||
404 | bb->next = bblist; | ||
405 | bblist = bb; | ||
406 | } | ||
407 | return bblist; | ||
408 | } | ||
409 | |||
410 | /** | ||
411 | * create_zone_bm_list - create a list of zone bitmap objects | ||
412 | */ | ||
413 | |||
414 | static inline struct zone_bitmap * | ||
415 | create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) | ||
416 | { | ||
417 | struct zone_bitmap *zbmlist = NULL; | ||
418 | |||
419 | while (nr_zones-- > 0) { | ||
420 | struct zone_bitmap *zbm; | ||
421 | |||
422 | zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); | ||
423 | if (!zbm) | ||
424 | return NULL; | ||
425 | |||
426 | zbm->next = zbmlist; | ||
427 | zbmlist = zbm; | ||
428 | } | ||
429 | return zbmlist; | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * memory_bm_create - allocate memory for a memory bitmap | ||
434 | */ | ||
435 | |||
436 | static int | ||
437 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | ||
438 | { | ||
439 | struct chain_allocator ca; | ||
440 | struct zone *zone; | ||
441 | struct zone_bitmap *zone_bm; | ||
442 | struct bm_block *bb; | ||
443 | unsigned int nr; | ||
444 | |||
445 | chain_init(&ca, gfp_mask, safe_needed); | ||
446 | |||
447 | /* Compute the number of zones */ | ||
448 | nr = 0; | ||
449 | for_each_zone (zone) | ||
450 | if (populated_zone(zone) && !is_highmem(zone)) | ||
451 | nr++; | ||
452 | |||
453 | /* Allocate the list of zones bitmap objects */ | ||
454 | zone_bm = create_zone_bm_list(nr, &ca); | ||
455 | bm->zone_bm_list = zone_bm; | ||
456 | if (!zone_bm) { | ||
457 | chain_free(&ca, PG_UNSAFE_CLEAR); | ||
458 | return -ENOMEM; | ||
459 | } | ||
460 | |||
461 | /* Initialize the zone bitmap objects */ | ||
462 | for_each_zone (zone) { | ||
463 | unsigned long pfn; | ||
464 | |||
465 | if (!populated_zone(zone) || is_highmem(zone)) | ||
466 | continue; | ||
467 | |||
468 | zone_bm->start_pfn = zone->zone_start_pfn; | ||
469 | zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; | ||
470 | /* Allocate the list of bitmap block objects */ | ||
471 | nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | ||
472 | bb = create_bm_block_list(nr, &ca); | ||
473 | zone_bm->bm_blocks = bb; | ||
474 | zone_bm->cur_block = bb; | ||
475 | if (!bb) | ||
476 | goto Free; | ||
477 | |||
478 | nr = zone->spanned_pages; | ||
479 | pfn = zone->zone_start_pfn; | ||
480 | /* Initialize the bitmap block objects */ | ||
481 | while (bb) { | ||
482 | unsigned long *ptr; | ||
483 | |||
484 | ptr = alloc_image_page(gfp_mask, safe_needed); | ||
485 | bb->data = ptr; | ||
486 | if (!ptr) | ||
487 | goto Free; | ||
488 | |||
489 | bb->start_pfn = pfn; | ||
490 | if (nr >= BM_BITS_PER_BLOCK) { | ||
491 | pfn += BM_BITS_PER_BLOCK; | ||
492 | bb->size = BM_CHUNKS_PER_BLOCK; | ||
493 | nr -= BM_BITS_PER_BLOCK; | ||
494 | } else { | ||
495 | /* This is executed only once in the loop */ | ||
496 | pfn += nr; | ||
497 | bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); | ||
498 | } | ||
499 | bb->end_pfn = pfn; | ||
500 | bb = bb->next; | ||
501 | } | ||
502 | zone_bm = zone_bm->next; | ||
503 | } | ||
504 | bm->p_list = ca.chain; | ||
505 | memory_bm_position_reset(bm); | ||
506 | return 0; | ||
507 | |||
508 | Free: | ||
509 | bm->p_list = ca.chain; | ||
510 | memory_bm_free(bm, PG_UNSAFE_CLEAR); | ||
511 | return -ENOMEM; | ||
512 | } | ||
513 | |||
514 | /** | ||
515 | * memory_bm_free - free memory occupied by the memory bitmap @bm | ||
516 | */ | ||
517 | |||
518 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | ||
519 | { | ||
520 | struct zone_bitmap *zone_bm; | ||
521 | |||
522 | /* Free the list of bit blocks for each zone_bitmap object */ | ||
523 | zone_bm = bm->zone_bm_list; | ||
524 | while (zone_bm) { | ||
525 | struct bm_block *bb; | ||
526 | |||
527 | bb = zone_bm->bm_blocks; | ||
528 | while (bb) { | ||
529 | if (bb->data) | ||
530 | free_image_page(bb->data, clear_nosave_free); | ||
531 | bb = bb->next; | ||
532 | } | ||
533 | zone_bm = zone_bm->next; | ||
534 | } | ||
535 | free_list_of_pages(bm->p_list, clear_nosave_free); | ||
536 | bm->zone_bm_list = NULL; | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds | ||
541 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member | ||
542 | * of @bm->cur_zone_bm are updated. | ||
543 | * | ||
544 | * If the bit cannot be set, the function returns -EINVAL . | ||
545 | */ | ||
546 | |||
547 | static int | ||
548 | memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) | ||
549 | { | ||
550 | struct zone_bitmap *zone_bm; | ||
551 | struct bm_block *bb; | ||
552 | |||
553 | /* Check if the pfn is from the current zone */ | ||
554 | zone_bm = bm->cur.zone_bm; | ||
555 | if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | ||
556 | zone_bm = bm->zone_bm_list; | ||
557 | /* We don't assume that the zones are sorted by pfns */ | ||
558 | while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | ||
559 | zone_bm = zone_bm->next; | ||
560 | if (unlikely(!zone_bm)) | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | bm->cur.zone_bm = zone_bm; | ||
564 | } | ||
565 | /* Check if the pfn corresponds to the current bitmap block */ | ||
566 | bb = zone_bm->cur_block; | ||
567 | if (pfn < bb->start_pfn) | ||
568 | bb = zone_bm->bm_blocks; | ||
569 | |||
570 | while (pfn >= bb->end_pfn) { | ||
571 | bb = bb->next; | ||
572 | if (unlikely(!bb)) | ||
573 | return -EINVAL; | ||
574 | } | ||
575 | zone_bm->cur_block = bb; | ||
576 | pfn -= bb->start_pfn; | ||
577 | set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | /* Two auxiliary functions for memory_bm_next_pfn */ | ||
582 | |||
583 | /* Find the first set bit in the given chunk, if there is one */ | ||
584 | |||
585 | static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p) | ||
586 | { | ||
587 | bit++; | ||
588 | while (bit < BM_BITS_PER_CHUNK) { | ||
589 | if (test_bit(bit, chunk_p)) | ||
590 | return bit; | ||
591 | |||
592 | bit++; | ||
593 | } | ||
594 | return -1; | ||
595 | } | ||
596 | |||
597 | /* Find a chunk containing some bits set in given block of bits */ | ||
598 | |||
599 | static inline int next_chunk_in_block(int n, struct bm_block *bb) | ||
600 | { | ||
601 | n++; | ||
602 | while (n < bb->size) { | ||
603 | if (bb->data[n]) | ||
604 | return n; | ||
605 | |||
606 | n++; | ||
607 | } | ||
608 | return -1; | ||
609 | } | ||
610 | |||
611 | /** | ||
612 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | ||
613 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | ||
614 | * returned. | ||
615 | * | ||
616 | * It is required to run memory_bm_position_reset() before the first call to | ||
617 | * this function. | ||
618 | */ | ||
619 | |||
620 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | ||
621 | { | ||
622 | struct zone_bitmap *zone_bm; | ||
623 | struct bm_block *bb; | ||
624 | int chunk; | ||
625 | int bit; | ||
626 | |||
627 | do { | ||
628 | bb = bm->cur.block; | ||
629 | do { | ||
630 | chunk = bm->cur.chunk; | ||
631 | bit = bm->cur.bit; | ||
632 | do { | ||
633 | bit = next_bit_in_chunk(bit, bb->data + chunk); | ||
634 | if (bit >= 0) | ||
635 | goto Return_pfn; | ||
636 | |||
637 | chunk = next_chunk_in_block(chunk, bb); | ||
638 | bit = -1; | ||
639 | } while (chunk >= 0); | ||
640 | bb = bb->next; | ||
641 | bm->cur.block = bb; | ||
642 | memory_bm_reset_chunk(bm); | ||
643 | } while (bb); | ||
644 | zone_bm = bm->cur.zone_bm->next; | ||
645 | if (zone_bm) { | ||
646 | bm->cur.zone_bm = zone_bm; | ||
647 | bm->cur.block = zone_bm->bm_blocks; | ||
648 | memory_bm_reset_chunk(bm); | ||
649 | } | ||
650 | } while (zone_bm); | ||
651 | memory_bm_position_reset(bm); | ||
652 | return BM_END_OF_MAP; | ||
653 | |||
654 | Return_pfn: | ||
655 | bm->cur.chunk = chunk; | ||
656 | bm->cur.bit = bit; | ||
657 | return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * snapshot_additional_pages - estimate the number of additional pages | ||
662 | * be needed for setting up the suspend image data structures for given | ||
663 | * zone (usually the returned value is greater than the exact number) | ||
664 | */ | ||
665 | |||
666 | unsigned int snapshot_additional_pages(struct zone *zone) | ||
667 | { | ||
668 | unsigned int res; | ||
669 | |||
670 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | ||
671 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); | ||
672 | return res; | ||
673 | } | ||
674 | |||
214 | /** | 675 | /** |
215 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | 676 | * pfn_is_nosave - check if given pfn is in the 'nosave' section |
216 | */ | 677 | */ |
@@ -276,32 +737,38 @@ static inline void copy_data_page(long *dst, long *src) | |||
276 | *dst++ = *src++; | 737 | *dst++ = *src++; |
277 | } | 738 | } |
278 | 739 | ||
279 | static void copy_data_pages(struct pbe *pblist) | 740 | static void |
741 | copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) | ||
280 | { | 742 | { |
281 | struct zone *zone; | 743 | struct zone *zone; |
282 | unsigned long pfn, max_zone_pfn; | 744 | unsigned long pfn; |
283 | struct pbe *pbe; | ||
284 | 745 | ||
285 | pbe = pblist; | ||
286 | for_each_zone (zone) { | 746 | for_each_zone (zone) { |
747 | unsigned long max_zone_pfn; | ||
748 | |||
287 | if (is_highmem(zone)) | 749 | if (is_highmem(zone)) |
288 | continue; | 750 | continue; |
751 | |||
289 | mark_free_pages(zone); | 752 | mark_free_pages(zone); |
290 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 753 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
291 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { | 754 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
292 | struct page *page = saveable_page(pfn); | 755 | if (saveable_page(pfn)) |
293 | 756 | memory_bm_set_bit(orig_bm, pfn); | |
294 | if (page) { | ||
295 | void *ptr = page_address(page); | ||
296 | |||
297 | BUG_ON(!pbe); | ||
298 | copy_data_page((void *)pbe->address, ptr); | ||
299 | pbe->orig_address = (unsigned long)ptr; | ||
300 | pbe = pbe->next; | ||
301 | } | ||
302 | } | ||
303 | } | 757 | } |
304 | BUG_ON(pbe); | 758 | memory_bm_position_reset(orig_bm); |
759 | memory_bm_position_reset(copy_bm); | ||
760 | do { | ||
761 | pfn = memory_bm_next_pfn(orig_bm); | ||
762 | if (likely(pfn != BM_END_OF_MAP)) { | ||
763 | struct page *page; | ||
764 | void *src; | ||
765 | |||
766 | page = pfn_to_page(pfn); | ||
767 | src = page_address(page); | ||
768 | page = pfn_to_page(memory_bm_next_pfn(copy_bm)); | ||
769 | copy_data_page(page_address(page), src); | ||
770 | } | ||
771 | } while (pfn != BM_END_OF_MAP); | ||
305 | } | 772 | } |
306 | 773 | ||
307 | /** | 774 | /** |
@@ -447,37 +914,43 @@ static int enough_free_mem(unsigned int nr_pages) | |||
447 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); | 914 | (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); |
448 | } | 915 | } |
449 | 916 | ||
450 | static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) | 917 | static int |
918 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | ||
919 | unsigned int nr_pages) | ||
451 | { | 920 | { |
452 | struct pbe *p; | 921 | int error; |
453 | 922 | ||
454 | for_each_pbe (p, pblist) { | 923 | error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); |
455 | p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); | 924 | if (error) |
456 | if (!p->address) | 925 | goto Free; |
457 | return -ENOMEM; | ||
458 | } | ||
459 | return 0; | ||
460 | } | ||
461 | 926 | ||
462 | static struct pbe *swsusp_alloc(unsigned int nr_pages) | 927 | error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); |
463 | { | 928 | if (error) |
464 | struct pbe *pblist; | 929 | goto Free; |
465 | 930 | ||
466 | pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, PG_ANY); | 931 | while (nr_pages-- > 0) { |
467 | if (!pblist) { | 932 | struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); |
468 | printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); | 933 | if (!page) |
469 | return NULL; | 934 | goto Free; |
470 | } | ||
471 | 935 | ||
472 | if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, PG_ANY)) { | 936 | SetPageNosave(page); |
473 | printk(KERN_ERR "suspend: Allocating image pages failed.\n"); | 937 | SetPageNosaveFree(page); |
474 | swsusp_free(); | 938 | memory_bm_set_bit(copy_bm, page_to_pfn(page)); |
475 | return NULL; | ||
476 | } | 939 | } |
940 | return 0; | ||
477 | 941 | ||
478 | return pblist; | 942 | Free: |
943 | swsusp_free(); | ||
944 | return -ENOMEM; | ||
479 | } | 945 | } |
480 | 946 | ||
947 | /* Memory bitmap used for marking saveable pages */ | ||
948 | static struct memory_bitmap orig_bm; | ||
949 | /* Memory bitmap used for marking allocated pages that will contain the copies | ||
950 | * of saveable pages | ||
951 | */ | ||
952 | static struct memory_bitmap copy_bm; | ||
953 | |||
481 | asmlinkage int swsusp_save(void) | 954 | asmlinkage int swsusp_save(void) |
482 | { | 955 | { |
483 | unsigned int nr_pages; | 956 | unsigned int nr_pages; |
@@ -498,15 +971,14 @@ asmlinkage int swsusp_save(void) | |||
498 | return -ENOMEM; | 971 | return -ENOMEM; |
499 | } | 972 | } |
500 | 973 | ||
501 | restore_pblist = swsusp_alloc(nr_pages); | 974 | if (swsusp_alloc(&orig_bm, ©_bm, nr_pages)) |
502 | if (!restore_pblist) | ||
503 | return -ENOMEM; | 975 | return -ENOMEM; |
504 | 976 | ||
505 | /* During allocating of suspend pagedir, new cold pages may appear. | 977 | /* During allocating of suspend pagedir, new cold pages may appear. |
506 | * Kill them. | 978 | * Kill them. |
507 | */ | 979 | */ |
508 | drain_local_pages(); | 980 | drain_local_pages(); |
509 | copy_data_pages(restore_pblist); | 981 | copy_data_pages(©_bm, &orig_bm); |
510 | 982 | ||
511 | /* | 983 | /* |
512 | * End of critical section. From now on, we can write to memory, | 984 | * End of critical section. From now on, we can write to memory, |
@@ -535,22 +1007,23 @@ static void init_header(struct swsusp_info *info) | |||
535 | } | 1007 | } |
536 | 1008 | ||
537 | /** | 1009 | /** |
538 | * pack_orig_addresses - the .orig_address fields of the PBEs from the | 1010 | * pack_addresses - the addresses corresponding to pfns found in the |
539 | * list starting at @pbe are stored in the array @buf[] (1 page) | 1011 | * bitmap @bm are stored in the array @buf[] (1 page) |
540 | */ | 1012 | */ |
541 | 1013 | ||
542 | static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) | 1014 | static inline void |
1015 | pack_addresses(unsigned long *buf, struct memory_bitmap *bm) | ||
543 | { | 1016 | { |
544 | int j; | 1017 | int j; |
545 | 1018 | ||
546 | for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { | 1019 | for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { |
547 | buf[j] = pbe->orig_address; | 1020 | unsigned long pfn = memory_bm_next_pfn(bm); |
548 | pbe = pbe->next; | 1021 | |
1022 | if (unlikely(pfn == BM_END_OF_MAP)) | ||
1023 | break; | ||
1024 | |||
1025 | buf[j] = (unsigned long)page_address(pfn_to_page(pfn)); | ||
549 | } | 1026 | } |
550 | if (!pbe) | ||
551 | for (; j < PAGE_SIZE / sizeof(long); j++) | ||
552 | buf[j] = 0; | ||
553 | return pbe; | ||
554 | } | 1027 | } |
555 | 1028 | ||
556 | /** | 1029 | /** |
@@ -579,6 +1052,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) | |||
579 | { | 1052 | { |
580 | if (handle->cur > nr_meta_pages + nr_copy_pages) | 1053 | if (handle->cur > nr_meta_pages + nr_copy_pages) |
581 | return 0; | 1054 | return 0; |
1055 | |||
582 | if (!buffer) { | 1056 | if (!buffer) { |
583 | /* This makes the buffer be freed by swsusp_free() */ | 1057 | /* This makes the buffer be freed by swsusp_free() */ |
584 | buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); | 1058 | buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); |
@@ -588,16 +1062,17 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) | |||
588 | if (!handle->offset) { | 1062 | if (!handle->offset) { |
589 | init_header((struct swsusp_info *)buffer); | 1063 | init_header((struct swsusp_info *)buffer); |
590 | handle->buffer = buffer; | 1064 | handle->buffer = buffer; |
591 | handle->pbe = restore_pblist; | 1065 | memory_bm_position_reset(&orig_bm); |
1066 | memory_bm_position_reset(©_bm); | ||
592 | } | 1067 | } |
593 | if (handle->prev < handle->cur) { | 1068 | if (handle->prev < handle->cur) { |
594 | if (handle->cur <= nr_meta_pages) { | 1069 | if (handle->cur <= nr_meta_pages) { |
595 | handle->pbe = pack_orig_addresses(buffer, handle->pbe); | 1070 | memset(buffer, 0, PAGE_SIZE); |
596 | if (!handle->pbe) | 1071 | pack_addresses(buffer, &orig_bm); |
597 | handle->pbe = restore_pblist; | ||
598 | } else { | 1072 | } else { |
599 | handle->buffer = (void *)handle->pbe->address; | 1073 | unsigned long pfn = memory_bm_next_pfn(©_bm); |
600 | handle->pbe = handle->pbe->next; | 1074 | |
1075 | handle->buffer = page_address(pfn_to_page(pfn)); | ||
601 | } | 1076 | } |
602 | handle->prev = handle->cur; | 1077 | handle->prev = handle->cur; |
603 | } | 1078 | } |
@@ -736,12 +1211,7 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf, | |||
736 | * of "safe" which will be used later | 1211 | * of "safe" which will be used later |
737 | */ | 1212 | */ |
738 | 1213 | ||
739 | struct safe_page { | 1214 | static struct linked_page *safe_pages; |
740 | struct safe_page *next; | ||
741 | char padding[PAGE_SIZE - sizeof(void *)]; | ||
742 | }; | ||
743 | |||
744 | static struct safe_page *safe_pages; | ||
745 | 1215 | ||
746 | static int prepare_image(struct snapshot_handle *handle) | 1216 | static int prepare_image(struct snapshot_handle *handle) |
747 | { | 1217 | { |
@@ -763,9 +1233,9 @@ static int prepare_image(struct snapshot_handle *handle) | |||
763 | if (!error && nr_pages > unsafe_pages) { | 1233 | if (!error && nr_pages > unsafe_pages) { |
764 | nr_pages -= unsafe_pages; | 1234 | nr_pages -= unsafe_pages; |
765 | while (nr_pages--) { | 1235 | while (nr_pages--) { |
766 | struct safe_page *ptr; | 1236 | struct linked_page *ptr; |
767 | 1237 | ||
768 | ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC); | 1238 | ptr = (void *)get_zeroed_page(GFP_ATOMIC); |
769 | if (!ptr) { | 1239 | if (!ptr) { |
770 | error = -ENOMEM; | 1240 | error = -ENOMEM; |
771 | break; | 1241 | break; |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 17f669c83012..8ef677ea0cea 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -193,14 +193,13 @@ int swsusp_shrink_memory(void) | |||
193 | printk("Shrinking memory... "); | 193 | printk("Shrinking memory... "); |
194 | do { | 194 | do { |
195 | size = 2 * count_highmem_pages(); | 195 | size = 2 * count_highmem_pages(); |
196 | size += size / 50 + count_data_pages(); | 196 | size += size / 50 + count_data_pages() + PAGES_FOR_IO; |
197 | size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE + | ||
198 | PAGES_FOR_IO; | ||
199 | tmp = size; | 197 | tmp = size; |
200 | for_each_zone (zone) | 198 | for_each_zone (zone) |
201 | if (!is_highmem(zone) && populated_zone(zone)) { | 199 | if (!is_highmem(zone) && populated_zone(zone)) { |
202 | tmp -= zone->free_pages; | 200 | tmp -= zone->free_pages; |
203 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; | 201 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; |
202 | tmp += snapshot_additional_pages(zone); | ||
204 | } | 203 | } |
205 | if (tmp > 0) { | 204 | if (tmp > 0) { |
206 | tmp = __shrink_memory(tmp); | 205 | tmp = __shrink_memory(tmp); |