diff options
author | Joerg Roedel <jroedel@suse.de> | 2014-07-21 06:27:01 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-07-28 19:47:44 -0400 |
commit | 9047eb629e5cd25ae3834d8c62ae02eb8c32bc17 (patch) | |
tree | 4f0fa2a919bad3ef3bfc5254344f52b2d2895919 /kernel | |
parent | 6efde38f07690652bf0d93f5e4f1a5f496574806 (diff) |
PM / Hibernate: Remove the old memory-bitmap implementation
The radix tree implementatio is proved to work the same as
the old implementation now. So the old implementation can be
removed to finish the switch to the radix tree for the
memory bitmaps.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/snapshot.c | 223 |
1 files changed, 21 insertions, 202 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5b71caf43d32..ab1998adb0a9 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -267,18 +267,6 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | |||
267 | #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) | 267 | #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) |
268 | #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) | 268 | #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) |
269 | 269 | ||
270 | struct bm_block { | ||
271 | struct list_head hook; /* hook into a list of bitmap blocks */ | ||
272 | unsigned long start_pfn; /* pfn represented by the first bit */ | ||
273 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | ||
274 | unsigned long *data; /* bitmap representing pages */ | ||
275 | }; | ||
276 | |||
277 | static inline unsigned long bm_block_bits(struct bm_block *bb) | ||
278 | { | ||
279 | return bb->end_pfn - bb->start_pfn; | ||
280 | } | ||
281 | |||
282 | /* | 270 | /* |
283 | * struct rtree_node is a wrapper struct to link the nodes | 271 | * struct rtree_node is a wrapper struct to link the nodes |
284 | * of the rtree together for easy linear iteration over | 272 | * of the rtree together for easy linear iteration over |
@@ -307,9 +295,6 @@ struct mem_zone_bm_rtree { | |||
307 | /* strcut bm_position is used for browsing memory bitmaps */ | 295 | /* strcut bm_position is used for browsing memory bitmaps */ |
308 | 296 | ||
309 | struct bm_position { | 297 | struct bm_position { |
310 | struct bm_block *block; | ||
311 | int bit; | ||
312 | |||
313 | struct mem_zone_bm_rtree *zone; | 298 | struct mem_zone_bm_rtree *zone; |
314 | struct rtree_node *node; | 299 | struct rtree_node *node; |
315 | unsigned long node_pfn; | 300 | unsigned long node_pfn; |
@@ -318,7 +303,6 @@ struct bm_position { | |||
318 | 303 | ||
319 | struct memory_bitmap { | 304 | struct memory_bitmap { |
320 | struct list_head zones; | 305 | struct list_head zones; |
321 | struct list_head blocks; /* list of bitmap blocks */ | ||
322 | struct linked_page *p_list; /* list of pages used to store zone | 306 | struct linked_page *p_list; /* list of pages used to store zone |
323 | * bitmap objects and bitmap block | 307 | * bitmap objects and bitmap block |
324 | * objects | 308 | * objects |
@@ -490,9 +474,6 @@ static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, | |||
490 | 474 | ||
491 | static void memory_bm_position_reset(struct memory_bitmap *bm) | 475 | static void memory_bm_position_reset(struct memory_bitmap *bm) |
492 | { | 476 | { |
493 | bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); | ||
494 | bm->cur.bit = 0; | ||
495 | |||
496 | bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, | 477 | bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, |
497 | list); | 478 | list); |
498 | bm->cur.node = list_entry(bm->cur.zone->leaves.next, | 479 | bm->cur.node = list_entry(bm->cur.zone->leaves.next, |
@@ -503,30 +484,6 @@ static void memory_bm_position_reset(struct memory_bitmap *bm) | |||
503 | 484 | ||
504 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | 485 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); |
505 | 486 | ||
506 | /** | ||
507 | * create_bm_block_list - create a list of block bitmap objects | ||
508 | * @pages - number of pages to track | ||
509 | * @list - list to put the allocated blocks into | ||
510 | * @ca - chain allocator to be used for allocating memory | ||
511 | */ | ||
512 | static int create_bm_block_list(unsigned long pages, | ||
513 | struct list_head *list, | ||
514 | struct chain_allocator *ca) | ||
515 | { | ||
516 | unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); | ||
517 | |||
518 | while (nr_blocks-- > 0) { | ||
519 | struct bm_block *bb; | ||
520 | |||
521 | bb = chain_alloc(ca, sizeof(struct bm_block)); | ||
522 | if (!bb) | ||
523 | return -ENOMEM; | ||
524 | list_add(&bb->hook, list); | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | struct mem_extent { | 487 | struct mem_extent { |
531 | struct list_head hook; | 488 | struct list_head hook; |
532 | unsigned long start; | 489 | unsigned long start; |
@@ -618,7 +575,6 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | |||
618 | int error; | 575 | int error; |
619 | 576 | ||
620 | chain_init(&ca, gfp_mask, safe_needed); | 577 | chain_init(&ca, gfp_mask, safe_needed); |
621 | INIT_LIST_HEAD(&bm->blocks); | ||
622 | INIT_LIST_HEAD(&bm->zones); | 578 | INIT_LIST_HEAD(&bm->zones); |
623 | 579 | ||
624 | error = create_mem_extents(&mem_extents, gfp_mask); | 580 | error = create_mem_extents(&mem_extents, gfp_mask); |
@@ -627,38 +583,13 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | |||
627 | 583 | ||
628 | list_for_each_entry(ext, &mem_extents, hook) { | 584 | list_for_each_entry(ext, &mem_extents, hook) { |
629 | struct mem_zone_bm_rtree *zone; | 585 | struct mem_zone_bm_rtree *zone; |
630 | struct bm_block *bb; | ||
631 | unsigned long pfn = ext->start; | ||
632 | unsigned long pages = ext->end - ext->start; | ||
633 | |||
634 | bb = list_entry(bm->blocks.prev, struct bm_block, hook); | ||
635 | |||
636 | error = create_bm_block_list(pages, bm->blocks.prev, &ca); | ||
637 | if (error) | ||
638 | goto Error; | ||
639 | |||
640 | list_for_each_entry_continue(bb, &bm->blocks, hook) { | ||
641 | bb->data = get_image_page(gfp_mask, safe_needed); | ||
642 | if (!bb->data) { | ||
643 | error = -ENOMEM; | ||
644 | goto Error; | ||
645 | } | ||
646 | |||
647 | bb->start_pfn = pfn; | ||
648 | if (pages >= BM_BITS_PER_BLOCK) { | ||
649 | pfn += BM_BITS_PER_BLOCK; | ||
650 | pages -= BM_BITS_PER_BLOCK; | ||
651 | } else { | ||
652 | /* This is executed only once in the loop */ | ||
653 | pfn += pages; | ||
654 | } | ||
655 | bb->end_pfn = pfn; | ||
656 | } | ||
657 | 586 | ||
658 | zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, | 587 | zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, |
659 | ext->start, ext->end); | 588 | ext->start, ext->end); |
660 | if (!zone) | 589 | if (!zone) { |
590 | error = -ENOMEM; | ||
661 | goto Error; | 591 | goto Error; |
592 | } | ||
662 | list_add_tail(&zone->list, &bm->zones); | 593 | list_add_tail(&zone->list, &bm->zones); |
663 | } | 594 | } |
664 | 595 | ||
@@ -680,11 +611,6 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | |||
680 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | 611 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) |
681 | { | 612 | { |
682 | struct mem_zone_bm_rtree *zone; | 613 | struct mem_zone_bm_rtree *zone; |
683 | struct bm_block *bb; | ||
684 | |||
685 | list_for_each_entry(bb, &bm->blocks, hook) | ||
686 | if (bb->data) | ||
687 | free_image_page(bb->data, clear_nosave_free); | ||
688 | 614 | ||
689 | list_for_each_entry(zone, &bm->zones, list) | 615 | list_for_each_entry(zone, &bm->zones, list) |
690 | free_zone_bm_rtree(zone, clear_nosave_free); | 616 | free_zone_bm_rtree(zone, clear_nosave_free); |
@@ -692,55 +618,20 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | |||
692 | free_list_of_pages(bm->p_list, clear_nosave_free); | 618 | free_list_of_pages(bm->p_list, clear_nosave_free); |
693 | 619 | ||
694 | INIT_LIST_HEAD(&bm->zones); | 620 | INIT_LIST_HEAD(&bm->zones); |
695 | INIT_LIST_HEAD(&bm->blocks); | ||
696 | } | 621 | } |
697 | 622 | ||
698 | /** | 623 | /** |
699 | * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds | 624 | * memory_bm_find_bit - Find the bit for pfn in the memory |
700 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member | 625 | * bitmap |
701 | * of @bm->cur_zone_bm are updated. | ||
702 | */ | ||
703 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, | ||
704 | void **addr, unsigned int *bit_nr) | ||
705 | { | ||
706 | struct bm_block *bb; | ||
707 | |||
708 | /* | ||
709 | * Check if the pfn corresponds to the current bitmap block and find | ||
710 | * the block where it fits if this is not the case. | ||
711 | */ | ||
712 | bb = bm->cur.block; | ||
713 | if (pfn < bb->start_pfn) | ||
714 | list_for_each_entry_continue_reverse(bb, &bm->blocks, hook) | ||
715 | if (pfn >= bb->start_pfn) | ||
716 | break; | ||
717 | |||
718 | if (pfn >= bb->end_pfn) | ||
719 | list_for_each_entry_continue(bb, &bm->blocks, hook) | ||
720 | if (pfn >= bb->start_pfn && pfn < bb->end_pfn) | ||
721 | break; | ||
722 | |||
723 | if (&bb->hook == &bm->blocks) | ||
724 | return -EFAULT; | ||
725 | |||
726 | /* The block has been found */ | ||
727 | bm->cur.block = bb; | ||
728 | pfn -= bb->start_pfn; | ||
729 | bm->cur.bit = pfn + 1; | ||
730 | *bit_nr = pfn; | ||
731 | *addr = bb->data; | ||
732 | return 0; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * memory_rtree_find_bit - Find the bit for pfn in the memory | ||
737 | * bitmap | ||
738 | * | 626 | * |
739 | * Walks the radix tree to find the page which contains the bit for | 627 | * Find the bit in the bitmap @bm that corresponds to given pfn. |
628 | * The cur.zone, cur.block and cur.node_pfn member of @bm are | ||
629 | * updated. | ||
630 | * It walks the radix tree to find the page which contains the bit for | ||
740 | * pfn and returns the bit position in **addr and *bit_nr. | 631 | * pfn and returns the bit position in **addr and *bit_nr. |
741 | */ | 632 | */ |
742 | static int memory_rtree_find_bit(struct memory_bitmap *bm, unsigned long pfn, | 633 | static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, |
743 | void **addr, unsigned int *bit_nr) | 634 | void **addr, unsigned int *bit_nr) |
744 | { | 635 | { |
745 | struct mem_zone_bm_rtree *curr, *zone; | 636 | struct mem_zone_bm_rtree *curr, *zone; |
746 | struct rtree_node *node; | 637 | struct rtree_node *node; |
@@ -808,10 +699,6 @@ static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
808 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); | 699 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); |
809 | BUG_ON(error); | 700 | BUG_ON(error); |
810 | set_bit(bit, addr); | 701 | set_bit(bit, addr); |
811 | |||
812 | error = memory_rtree_find_bit(bm, pfn, &addr, &bit); | ||
813 | BUG_ON(error); | ||
814 | set_bit(bit, addr); | ||
815 | } | 702 | } |
816 | 703 | ||
817 | static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) | 704 | static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) |
@@ -823,12 +710,6 @@ static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) | |||
823 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); | 710 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); |
824 | if (!error) | 711 | if (!error) |
825 | set_bit(bit, addr); | 712 | set_bit(bit, addr); |
826 | else | ||
827 | return error; | ||
828 | |||
829 | error = memory_rtree_find_bit(bm, pfn, &addr, &bit); | ||
830 | if (!error) | ||
831 | set_bit(bit, addr); | ||
832 | 713 | ||
833 | return error; | 714 | return error; |
834 | } | 715 | } |
@@ -842,10 +723,6 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
842 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); | 723 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); |
843 | BUG_ON(error); | 724 | BUG_ON(error); |
844 | clear_bit(bit, addr); | 725 | clear_bit(bit, addr); |
845 | |||
846 | error = memory_rtree_find_bit(bm, pfn, &addr, &bit); | ||
847 | BUG_ON(error); | ||
848 | clear_bit(bit, addr); | ||
849 | } | 726 | } |
850 | 727 | ||
851 | static void memory_bm_clear_current(struct memory_bitmap *bm) | 728 | static void memory_bm_clear_current(struct memory_bitmap *bm) |
@@ -854,82 +731,25 @@ static void memory_bm_clear_current(struct memory_bitmap *bm) | |||
854 | 731 | ||
855 | bit = max(bm->cur.node_bit - 1, 0); | 732 | bit = max(bm->cur.node_bit - 1, 0); |
856 | clear_bit(bit, bm->cur.node->data); | 733 | clear_bit(bit, bm->cur.node->data); |
857 | |||
858 | bit = max(bm->cur.bit - 1, 0); | ||
859 | clear_bit(bit, bm->cur.block->data); | ||
860 | } | 734 | } |
861 | 735 | ||
862 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | 736 | static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) |
863 | { | 737 | { |
864 | void *addr; | 738 | void *addr; |
865 | unsigned int bit; | 739 | unsigned int bit; |
866 | int error, error2; | 740 | int error; |
867 | int v; | ||
868 | 741 | ||
869 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); | 742 | error = memory_bm_find_bit(bm, pfn, &addr, &bit); |
870 | BUG_ON(error); | 743 | BUG_ON(error); |
871 | v = test_bit(bit, addr); | 744 | return test_bit(bit, addr); |
872 | |||
873 | error2 = memory_rtree_find_bit(bm, pfn, &addr, &bit); | ||
874 | BUG_ON(error2); | ||
875 | |||
876 | WARN_ON_ONCE(v != test_bit(bit, addr)); | ||
877 | |||
878 | return v; | ||
879 | } | 745 | } |
880 | 746 | ||
881 | static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) | 747 | static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) |
882 | { | 748 | { |
883 | void *addr; | 749 | void *addr; |
884 | unsigned int bit; | 750 | unsigned int bit; |
885 | int present; | ||
886 | |||
887 | present = !memory_bm_find_bit(bm, pfn, &addr, &bit); | ||
888 | |||
889 | WARN_ON_ONCE(present != !memory_rtree_find_bit(bm, pfn, &addr, &bit)); | ||
890 | 751 | ||
891 | return present; | 752 | return !memory_bm_find_bit(bm, pfn, &addr, &bit); |
892 | } | ||
893 | |||
894 | /** | ||
895 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | ||
896 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | ||
897 | * returned. | ||
898 | * | ||
899 | * It is required to run memory_bm_position_reset() before the first call to | ||
900 | * this function. | ||
901 | */ | ||
902 | |||
903 | static unsigned long memory_bm_rtree_next_pfn(struct memory_bitmap *bm); | ||
904 | |||
905 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | ||
906 | { | ||
907 | unsigned long rtree_pfn; | ||
908 | struct bm_block *bb; | ||
909 | int bit; | ||
910 | |||
911 | rtree_pfn = memory_bm_rtree_next_pfn(bm); | ||
912 | |||
913 | bb = bm->cur.block; | ||
914 | do { | ||
915 | bit = bm->cur.bit; | ||
916 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); | ||
917 | if (bit < bm_block_bits(bb)) | ||
918 | goto Return_pfn; | ||
919 | |||
920 | bb = list_entry(bb->hook.next, struct bm_block, hook); | ||
921 | bm->cur.block = bb; | ||
922 | bm->cur.bit = 0; | ||
923 | } while (&bb->hook != &bm->blocks); | ||
924 | |||
925 | memory_bm_position_reset(bm); | ||
926 | WARN_ON_ONCE(rtree_pfn != BM_END_OF_MAP); | ||
927 | return BM_END_OF_MAP; | ||
928 | |||
929 | Return_pfn: | ||
930 | WARN_ON_ONCE(bb->start_pfn + bit != rtree_pfn); | ||
931 | bm->cur.bit = bit + 1; | ||
932 | return bb->start_pfn + bit; | ||
933 | } | 753 | } |
934 | 754 | ||
935 | /* | 755 | /* |
@@ -967,14 +787,17 @@ static bool rtree_next_node(struct memory_bitmap *bm) | |||
967 | return false; | 787 | return false; |
968 | } | 788 | } |
969 | 789 | ||
970 | /* | 790 | /** |
971 | * memory_bm_rtree_next_pfn - Find the next set bit | 791 | * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm |
972 | * | 792 | * |
973 | * Starting from the last returned position this function searches | 793 | * Starting from the last returned position this function searches |
974 | * for the next set bit in the memory bitmap and returns its | 794 | * for the next set bit in the memory bitmap and returns its |
975 | * number. If no more bit is set BM_END_OF_MAP is returned. | 795 | * number. If no more bit is set BM_END_OF_MAP is returned. |
796 | * | ||
797 | * It is required to run memory_bm_position_reset() before the | ||
798 | * first call to this function. | ||
976 | */ | 799 | */ |
977 | static unsigned long memory_bm_rtree_next_pfn(struct memory_bitmap *bm) | 800 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) |
978 | { | 801 | { |
979 | unsigned long bits, pfn, pages; | 802 | unsigned long bits, pfn, pages; |
980 | int bit; | 803 | int bit; |
@@ -1216,11 +1039,7 @@ void free_basic_memory_bitmaps(void) | |||
1216 | unsigned int snapshot_additional_pages(struct zone *zone) | 1039 | unsigned int snapshot_additional_pages(struct zone *zone) |
1217 | { | 1040 | { |
1218 | unsigned int rtree, nodes; | 1041 | unsigned int rtree, nodes; |
1219 | unsigned int res; | ||
1220 | 1042 | ||
1221 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | ||
1222 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), | ||
1223 | LINKED_PAGE_DATA_SIZE); | ||
1224 | rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | 1043 | rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); |
1225 | rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), | 1044 | rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), |
1226 | LINKED_PAGE_DATA_SIZE); | 1045 | LINKED_PAGE_DATA_SIZE); |
@@ -1229,7 +1048,7 @@ unsigned int snapshot_additional_pages(struct zone *zone) | |||
1229 | rtree += nodes; | 1048 | rtree += nodes; |
1230 | } | 1049 | } |
1231 | 1050 | ||
1232 | return 2 * (res + rtree); | 1051 | return 2 * rtree; |
1233 | } | 1052 | } |
1234 | 1053 | ||
1235 | #ifdef CONFIG_HIGHMEM | 1054 | #ifdef CONFIG_HIGHMEM |