aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c234
1 files changed, 112 insertions, 122 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index fdd7d5faa90c..54fb3feebb59 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -356,8 +356,10 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
356 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 356 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
357} 357}
358 358
359static void break_cow(struct mm_struct *mm, unsigned long addr) 359static void break_cow(struct rmap_item *rmap_item)
360{ 360{
361 struct mm_struct *mm = rmap_item->mm;
362 unsigned long addr = rmap_item->address;
361 struct vm_area_struct *vma; 363 struct vm_area_struct *vma;
362 364
363 down_read(&mm->mmap_sem); 365 down_read(&mm->mmap_sem);
@@ -665,15 +667,15 @@ out:
665 667
666/** 668/**
667 * replace_page - replace page in vma by new ksm page 669 * replace_page - replace page in vma by new ksm page
668 * @vma: vma that holds the pte pointing to oldpage 670 * @vma: vma that holds the pte pointing to page
669 * @oldpage: the page we are replacing by newpage 671 * @page: the page we are replacing by kpage
670 * @newpage: the ksm page we replace oldpage by 672 * @kpage: the ksm page we replace page by
671 * @orig_pte: the original value of the pte 673 * @orig_pte: the original value of the pte
672 * 674 *
673 * Returns 0 on success, -EFAULT on failure. 675 * Returns 0 on success, -EFAULT on failure.
674 */ 676 */
675static int replace_page(struct vm_area_struct *vma, struct page *oldpage, 677static int replace_page(struct vm_area_struct *vma, struct page *page,
676 struct page *newpage, pte_t orig_pte) 678 struct page *kpage, pte_t orig_pte)
677{ 679{
678 struct mm_struct *mm = vma->vm_mm; 680 struct mm_struct *mm = vma->vm_mm;
679 pgd_t *pgd; 681 pgd_t *pgd;
@@ -684,7 +686,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
684 unsigned long addr; 686 unsigned long addr;
685 int err = -EFAULT; 687 int err = -EFAULT;
686 688
687 addr = page_address_in_vma(oldpage, vma); 689 addr = page_address_in_vma(page, vma);
688 if (addr == -EFAULT) 690 if (addr == -EFAULT)
689 goto out; 691 goto out;
690 692
@@ -706,15 +708,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
706 goto out; 708 goto out;
707 } 709 }
708 710
709 get_page(newpage); 711 get_page(kpage);
710 page_add_ksm_rmap(newpage); 712 page_add_ksm_rmap(kpage);
711 713
712 flush_cache_page(vma, addr, pte_pfn(*ptep)); 714 flush_cache_page(vma, addr, pte_pfn(*ptep));
713 ptep_clear_flush(vma, addr, ptep); 715 ptep_clear_flush(vma, addr, ptep);
714 set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, vma->vm_page_prot)); 716 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
715 717
716 page_remove_rmap(oldpage); 718 page_remove_rmap(page);
717 put_page(oldpage); 719 put_page(page);
718 720
719 pte_unmap_unlock(ptep, ptl); 721 pte_unmap_unlock(ptep, ptl);
720 err = 0; 722 err = 0;
@@ -724,26 +726,22 @@ out:
724 726
725/* 727/*
726 * try_to_merge_one_page - take two pages and merge them into one 728 * try_to_merge_one_page - take two pages and merge them into one
727 * @vma: the vma that hold the pte pointing into oldpage 729 * @vma: the vma that holds the pte pointing to page
728 * @oldpage: the page that we want to replace with newpage 730 * @page: the PageAnon page that we want to replace with kpage
729 * @newpage: the page that we want to map instead of oldpage 731 * @kpage: the PageKsm page (or newly allocated page which page_add_ksm_rmap
730 * 732 * will make PageKsm) that we want to map instead of page
731 * Note:
732 * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
733 * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
734 * 733 *
735 * This function returns 0 if the pages were merged, -EFAULT otherwise. 734 * This function returns 0 if the pages were merged, -EFAULT otherwise.
736 */ 735 */
737static int try_to_merge_one_page(struct vm_area_struct *vma, 736static int try_to_merge_one_page(struct vm_area_struct *vma,
738 struct page *oldpage, 737 struct page *page, struct page *kpage)
739 struct page *newpage)
740{ 738{
741 pte_t orig_pte = __pte(0); 739 pte_t orig_pte = __pte(0);
742 int err = -EFAULT; 740 int err = -EFAULT;
743 741
744 if (!(vma->vm_flags & VM_MERGEABLE)) 742 if (!(vma->vm_flags & VM_MERGEABLE))
745 goto out; 743 goto out;
746 if (!PageAnon(oldpage)) 744 if (!PageAnon(page))
747 goto out; 745 goto out;
748 746
749 /* 747 /*
@@ -753,7 +751,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
753 * prefer to continue scanning and merging different pages, 751 * prefer to continue scanning and merging different pages,
754 * then come back to this page when it is unlocked. 752 * then come back to this page when it is unlocked.
755 */ 753 */
756 if (!trylock_page(oldpage)) 754 if (!trylock_page(page))
757 goto out; 755 goto out;
758 /* 756 /*
759 * If this anonymous page is mapped only here, its pte may need 757 * If this anonymous page is mapped only here, its pte may need
@@ -761,11 +759,11 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
761 * ptes are necessarily already write-protected. But in either 759 * ptes are necessarily already write-protected. But in either
762 * case, we need to lock and check page_count is not raised. 760 * case, we need to lock and check page_count is not raised.
763 */ 761 */
764 if (write_protect_page(vma, oldpage, &orig_pte) == 0 && 762 if (write_protect_page(vma, page, &orig_pte) == 0 &&
765 pages_identical(oldpage, newpage)) 763 pages_identical(page, kpage))
766 err = replace_page(vma, oldpage, newpage, orig_pte); 764 err = replace_page(vma, page, kpage, orig_pte);
767 765
768 unlock_page(oldpage); 766 unlock_page(page);
769out: 767out:
770 return err; 768 return err;
771} 769}
@@ -773,26 +771,26 @@ out:
773/* 771/*
774 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 772 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
775 * but no new kernel page is allocated: kpage must already be a ksm page. 773 * but no new kernel page is allocated: kpage must already be a ksm page.
774 *
775 * This function returns 0 if the pages were merged, -EFAULT otherwise.
776 */ 776 */
777static int try_to_merge_with_ksm_page(struct mm_struct *mm1, 777static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
778 unsigned long addr1, 778 struct page *page, struct page *kpage)
779 struct page *page1,
780 struct page *kpage)
781{ 779{
780 struct mm_struct *mm = rmap_item->mm;
782 struct vm_area_struct *vma; 781 struct vm_area_struct *vma;
783 int err = -EFAULT; 782 int err = -EFAULT;
784 783
785 down_read(&mm1->mmap_sem); 784 down_read(&mm->mmap_sem);
786 if (ksm_test_exit(mm1)) 785 if (ksm_test_exit(mm))
787 goto out; 786 goto out;
788 787 vma = find_vma(mm, rmap_item->address);
789 vma = find_vma(mm1, addr1); 788 if (!vma || vma->vm_start > rmap_item->address)
790 if (!vma || vma->vm_start > addr1)
791 goto out; 789 goto out;
792 790
793 err = try_to_merge_one_page(vma, page1, kpage); 791 err = try_to_merge_one_page(vma, page, kpage);
794out: 792out:
795 up_read(&mm1->mmap_sem); 793 up_read(&mm->mmap_sem);
796 return err; 794 return err;
797} 795}
798 796
@@ -800,16 +798,18 @@ out:
800 * try_to_merge_two_pages - take two identical pages and prepare them 798 * try_to_merge_two_pages - take two identical pages and prepare them
801 * to be merged into one page. 799 * to be merged into one page.
802 * 800 *
803 * This function returns 0 if we successfully mapped two identical pages 801 * This function returns the kpage if we successfully merged two identical
804 * into one page, -EFAULT otherwise. 802 * pages into one ksm page, NULL otherwise.
805 * 803 *
806 * Note that this function allocates a new kernel page: if one of the pages 804 * Note that this function allocates a new kernel page: if one of the pages
807 * is already a ksm page, try_to_merge_with_ksm_page should be used. 805 * is already a ksm page, try_to_merge_with_ksm_page should be used.
808 */ 806 */
809static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, 807static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
810 struct page *page1, struct mm_struct *mm2, 808 struct page *page,
811 unsigned long addr2, struct page *page2) 809 struct rmap_item *tree_rmap_item,
810 struct page *tree_page)
812{ 811{
812 struct mm_struct *mm = rmap_item->mm;
813 struct vm_area_struct *vma; 813 struct vm_area_struct *vma;
814 struct page *kpage; 814 struct page *kpage;
815 int err = -EFAULT; 815 int err = -EFAULT;
@@ -820,47 +820,43 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
820 */ 820 */
821 if (ksm_max_kernel_pages && 821 if (ksm_max_kernel_pages &&
822 ksm_max_kernel_pages <= ksm_pages_shared) 822 ksm_max_kernel_pages <= ksm_pages_shared)
823 return err; 823 return NULL;
824 824
825 kpage = alloc_page(GFP_HIGHUSER); 825 kpage = alloc_page(GFP_HIGHUSER);
826 if (!kpage) 826 if (!kpage)
827 return err; 827 return NULL;
828
829 down_read(&mm1->mmap_sem);
830 if (ksm_test_exit(mm1)) {
831 up_read(&mm1->mmap_sem);
832 goto out;
833 }
834 vma = find_vma(mm1, addr1);
835 if (!vma || vma->vm_start > addr1) {
836 up_read(&mm1->mmap_sem);
837 goto out;
838 }
839 828
840 copy_user_highpage(kpage, page1, addr1, vma); 829 down_read(&mm->mmap_sem);
841 err = try_to_merge_one_page(vma, page1, kpage); 830 if (ksm_test_exit(mm))
842 up_read(&mm1->mmap_sem); 831 goto up;
832 vma = find_vma(mm, rmap_item->address);
833 if (!vma || vma->vm_start > rmap_item->address)
834 goto up;
835
836 copy_user_highpage(kpage, page, rmap_item->address, vma);
837 err = try_to_merge_one_page(vma, page, kpage);
838up:
839 up_read(&mm->mmap_sem);
843 840
844 if (!err) { 841 if (!err) {
845 err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); 842 err = try_to_merge_with_ksm_page(tree_rmap_item,
843 tree_page, kpage);
846 /* 844 /*
847 * If that fails, we have a ksm page with only one pte 845 * If that fails, we have a ksm page with only one pte
848 * pointing to it: so break it. 846 * pointing to it: so break it.
849 */ 847 */
850 if (err) 848 if (err)
851 break_cow(mm1, addr1); 849 break_cow(rmap_item);
852 } 850 }
853out: 851 if (err) {
854 put_page(kpage); 852 put_page(kpage);
855 return err; 853 kpage = NULL;
854 }
855 return kpage;
856} 856}
857 857
858/* 858/*
859 * stable_tree_search - search page inside the stable tree 859 * stable_tree_search - search for page inside the stable tree
860 * @page: the page that we are searching identical pages to.
861 * @page2: pointer into identical page that we are holding inside the stable
862 * tree that we have found.
863 * @rmap_item: the reverse mapping item
864 * 860 *
865 * This function checks if there is a page inside the stable tree 861 * This function checks if there is a page inside the stable tree
866 * with identical content to the page that we are scanning right now. 862 * with identical content to the page that we are scanning right now.
@@ -869,21 +865,21 @@ out:
869 * NULL otherwise. 865 * NULL otherwise.
870 */ 866 */
871static struct rmap_item *stable_tree_search(struct page *page, 867static struct rmap_item *stable_tree_search(struct page *page,
872 struct page **page2, 868 struct page **tree_pagep)
873 struct rmap_item *rmap_item)
874{ 869{
875 struct rb_node *node = root_stable_tree.rb_node; 870 struct rb_node *node = root_stable_tree.rb_node;
876 871
877 while (node) { 872 while (node) {
878 struct rmap_item *tree_rmap_item, *next_rmap_item; 873 struct rmap_item *tree_rmap_item, *next_rmap_item;
874 struct page *tree_page;
879 int ret; 875 int ret;
880 876
881 tree_rmap_item = rb_entry(node, struct rmap_item, node); 877 tree_rmap_item = rb_entry(node, struct rmap_item, node);
882 while (tree_rmap_item) { 878 while (tree_rmap_item) {
883 BUG_ON(!in_stable_tree(tree_rmap_item)); 879 BUG_ON(!in_stable_tree(tree_rmap_item));
884 cond_resched(); 880 cond_resched();
885 page2[0] = get_ksm_page(tree_rmap_item); 881 tree_page = get_ksm_page(tree_rmap_item);
886 if (page2[0]) 882 if (tree_page)
887 break; 883 break;
888 next_rmap_item = tree_rmap_item->next; 884 next_rmap_item = tree_rmap_item->next;
889 remove_rmap_item_from_tree(tree_rmap_item); 885 remove_rmap_item_from_tree(tree_rmap_item);
@@ -892,15 +888,16 @@ static struct rmap_item *stable_tree_search(struct page *page,
892 if (!tree_rmap_item) 888 if (!tree_rmap_item)
893 return NULL; 889 return NULL;
894 890
895 ret = memcmp_pages(page, page2[0]); 891 ret = memcmp_pages(page, tree_page);
896 892
897 if (ret < 0) { 893 if (ret < 0) {
898 put_page(page2[0]); 894 put_page(tree_page);
899 node = node->rb_left; 895 node = node->rb_left;
900 } else if (ret > 0) { 896 } else if (ret > 0) {
901 put_page(page2[0]); 897 put_page(tree_page);
902 node = node->rb_right; 898 node = node->rb_right;
903 } else { 899 } else {
900 *tree_pagep = tree_page;
904 return tree_rmap_item; 901 return tree_rmap_item;
905 } 902 }
906 } 903 }
@@ -912,13 +909,9 @@ static struct rmap_item *stable_tree_search(struct page *page,
912 * stable_tree_insert - insert rmap_item pointing to new ksm page 909 * stable_tree_insert - insert rmap_item pointing to new ksm page
913 * into the stable tree. 910 * into the stable tree.
914 * 911 *
915 * @page: the page that we are searching identical page to inside the stable
916 * tree.
917 * @rmap_item: pointer to the reverse mapping item.
918 *
919 * This function returns rmap_item if success, NULL otherwise. 912 * This function returns rmap_item if success, NULL otherwise.
920 */ 913 */
921static struct rmap_item *stable_tree_insert(struct page *page, 914static struct rmap_item *stable_tree_insert(struct page *kpage,
922 struct rmap_item *rmap_item) 915 struct rmap_item *rmap_item)
923{ 916{
924 struct rb_node **new = &root_stable_tree.rb_node; 917 struct rb_node **new = &root_stable_tree.rb_node;
@@ -943,7 +936,7 @@ static struct rmap_item *stable_tree_insert(struct page *page,
943 if (!tree_rmap_item) 936 if (!tree_rmap_item)
944 return NULL; 937 return NULL;
945 938
946 ret = memcmp_pages(page, tree_page); 939 ret = memcmp_pages(kpage, tree_page);
947 put_page(tree_page); 940 put_page(tree_page);
948 941
949 parent = *new; 942 parent = *new;
@@ -971,12 +964,8 @@ static struct rmap_item *stable_tree_insert(struct page *page,
971} 964}
972 965
973/* 966/*
974 * unstable_tree_search_insert - search and insert items into the unstable tree. 967 * unstable_tree_search_insert - search for identical page,
975 * 968 * else insert rmap_item into the unstable tree.
976 * @page: the page that we are going to search for identical page or to insert
977 * into the unstable tree
978 * @page2: pointer into identical page that was found inside the unstable tree
979 * @rmap_item: the reverse mapping item of page
980 * 969 *
981 * This function searches for a page in the unstable tree identical to the 970 * This function searches for a page in the unstable tree identical to the
982 * page currently being scanned; and if no identical page is found in the 971 * page currently being scanned; and if no identical page is found in the
@@ -988,42 +977,45 @@ static struct rmap_item *stable_tree_insert(struct page *page,
988 * This function does both searching and inserting, because they share 977 * This function does both searching and inserting, because they share
989 * the same walking algorithm in an rbtree. 978 * the same walking algorithm in an rbtree.
990 */ 979 */
991static struct rmap_item *unstable_tree_search_insert(struct page *page, 980static
992 struct page **page2, 981struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
993 struct rmap_item *rmap_item) 982 struct page *page,
983 struct page **tree_pagep)
984
994{ 985{
995 struct rb_node **new = &root_unstable_tree.rb_node; 986 struct rb_node **new = &root_unstable_tree.rb_node;
996 struct rb_node *parent = NULL; 987 struct rb_node *parent = NULL;
997 988
998 while (*new) { 989 while (*new) {
999 struct rmap_item *tree_rmap_item; 990 struct rmap_item *tree_rmap_item;
991 struct page *tree_page;
1000 int ret; 992 int ret;
1001 993
1002 cond_resched(); 994 cond_resched();
1003 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 995 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1004 page2[0] = get_mergeable_page(tree_rmap_item); 996 tree_page = get_mergeable_page(tree_rmap_item);
1005 if (!page2[0]) 997 if (!tree_page)
1006 return NULL; 998 return NULL;
1007 999
1008 /* 1000 /*
1009 * Don't substitute an unswappable ksm page 1001 * Don't substitute a ksm page for a forked page.
1010 * just for one good swappable forked page.
1011 */ 1002 */
1012 if (page == page2[0]) { 1003 if (page == tree_page) {
1013 put_page(page2[0]); 1004 put_page(tree_page);
1014 return NULL; 1005 return NULL;
1015 } 1006 }
1016 1007
1017 ret = memcmp_pages(page, page2[0]); 1008 ret = memcmp_pages(page, tree_page);
1018 1009
1019 parent = *new; 1010 parent = *new;
1020 if (ret < 0) { 1011 if (ret < 0) {
1021 put_page(page2[0]); 1012 put_page(tree_page);
1022 new = &parent->rb_left; 1013 new = &parent->rb_left;
1023 } else if (ret > 0) { 1014 } else if (ret > 0) {
1024 put_page(page2[0]); 1015 put_page(tree_page);
1025 new = &parent->rb_right; 1016 new = &parent->rb_right;
1026 } else { 1017 } else {
1018 *tree_pagep = tree_page;
1027 return tree_rmap_item; 1019 return tree_rmap_item;
1028 } 1020 }
1029 } 1021 }
@@ -1068,24 +1060,23 @@ static void stable_tree_append(struct rmap_item *rmap_item,
1068 */ 1060 */
1069static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 1061static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1070{ 1062{
1071 struct page *page2[1];
1072 struct rmap_item *tree_rmap_item; 1063 struct rmap_item *tree_rmap_item;
1064 struct page *tree_page = NULL;
1065 struct page *kpage;
1073 unsigned int checksum; 1066 unsigned int checksum;
1074 int err; 1067 int err;
1075 1068
1076 remove_rmap_item_from_tree(rmap_item); 1069 remove_rmap_item_from_tree(rmap_item);
1077 1070
1078 /* We first start with searching the page inside the stable tree */ 1071 /* We first start with searching the page inside the stable tree */
1079 tree_rmap_item = stable_tree_search(page, page2, rmap_item); 1072 tree_rmap_item = stable_tree_search(page, &tree_page);
1080 if (tree_rmap_item) { 1073 if (tree_rmap_item) {
1081 if (page == page2[0]) /* forked */ 1074 kpage = tree_page;
1075 if (page == kpage) /* forked */
1082 err = 0; 1076 err = 0;
1083 else 1077 else
1084 err = try_to_merge_with_ksm_page(rmap_item->mm, 1078 err = try_to_merge_with_ksm_page(rmap_item,
1085 rmap_item->address, 1079 page, kpage);
1086 page, page2[0]);
1087 put_page(page2[0]);
1088
1089 if (!err) { 1080 if (!err) {
1090 /* 1081 /*
1091 * The page was successfully merged: 1082 * The page was successfully merged:
@@ -1093,6 +1084,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1093 */ 1084 */
1094 stable_tree_append(rmap_item, tree_rmap_item); 1085 stable_tree_append(rmap_item, tree_rmap_item);
1095 } 1086 }
1087 put_page(kpage);
1096 return; 1088 return;
1097 } 1089 }
1098 1090
@@ -1103,7 +1095,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1103 * when the mem_cgroup had reached its limit: try again now. 1095 * when the mem_cgroup had reached its limit: try again now.
1104 */ 1096 */
1105 if (PageKsm(page)) 1097 if (PageKsm(page))
1106 break_cow(rmap_item->mm, rmap_item->address); 1098 break_cow(rmap_item);
1107 1099
1108 /* 1100 /*
1109 * In case the hash value of the page was changed from the last time we 1101 * In case the hash value of the page was changed from the last time we
@@ -1117,18 +1109,18 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1117 return; 1109 return;
1118 } 1110 }
1119 1111
1120 tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item); 1112 tree_rmap_item =
1113 unstable_tree_search_insert(rmap_item, page, &tree_page);
1121 if (tree_rmap_item) { 1114 if (tree_rmap_item) {
1122 err = try_to_merge_two_pages(rmap_item->mm, 1115 kpage = try_to_merge_two_pages(rmap_item, page,
1123 rmap_item->address, page, 1116 tree_rmap_item, tree_page);
1124 tree_rmap_item->mm, 1117 put_page(tree_page);
1125 tree_rmap_item->address, page2[0]);
1126 /* 1118 /*
1127 * As soon as we merge this page, we want to remove the 1119 * As soon as we merge this page, we want to remove the
1128 * rmap_item of the page we have merged with from the unstable 1120 * rmap_item of the page we have merged with from the unstable
1129 * tree, and insert it instead as new node in the stable tree. 1121 * tree, and insert it instead as new node in the stable tree.
1130 */ 1122 */
1131 if (!err) { 1123 if (kpage) {
1132 remove_rmap_item_from_tree(tree_rmap_item); 1124 remove_rmap_item_from_tree(tree_rmap_item);
1133 1125
1134 /* 1126 /*
@@ -1137,16 +1129,14 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1137 * to a ksm page left outside the stable tree, 1129 * to a ksm page left outside the stable tree,
1138 * in which case we need to break_cow on both. 1130 * in which case we need to break_cow on both.
1139 */ 1131 */
1140 if (stable_tree_insert(page2[0], tree_rmap_item)) 1132 if (stable_tree_insert(kpage, tree_rmap_item))
1141 stable_tree_append(rmap_item, tree_rmap_item); 1133 stable_tree_append(rmap_item, tree_rmap_item);
1142 else { 1134 else {
1143 break_cow(tree_rmap_item->mm, 1135 break_cow(tree_rmap_item);
1144 tree_rmap_item->address); 1136 break_cow(rmap_item);
1145 break_cow(rmap_item->mm, rmap_item->address);
1146 } 1137 }
1138 put_page(kpage);
1147 } 1139 }
1148
1149 put_page(page2[0]);
1150 } 1140 }
1151} 1141}
1152 1142
@@ -1308,7 +1298,7 @@ static void ksm_do_scan(unsigned int scan_npages)
1308 /* 1298 /*
1309 * Replace now-unshared ksm page by ordinary page. 1299 * Replace now-unshared ksm page by ordinary page.
1310 */ 1300 */
1311 break_cow(rmap_item->mm, rmap_item->address); 1301 break_cow(rmap_item);
1312 remove_rmap_item_from_tree(rmap_item); 1302 remove_rmap_item_from_tree(rmap_item);
1313 rmap_item->oldchecksum = calc_checksum(page); 1303 rmap_item->oldchecksum = calc_checksum(page);
1314 } 1304 }