diff options
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 112 |
1 files changed, 44 insertions, 68 deletions
@@ -315,22 +315,18 @@ static void break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
315 | /* Which leaves us looping there if VM_FAULT_OOM: hmmm... */ | 315 | /* Which leaves us looping there if VM_FAULT_OOM: hmmm... */ |
316 | } | 316 | } |
317 | 317 | ||
318 | static void __break_cow(struct mm_struct *mm, unsigned long addr) | 318 | static void break_cow(struct mm_struct *mm, unsigned long addr) |
319 | { | 319 | { |
320 | struct vm_area_struct *vma; | 320 | struct vm_area_struct *vma; |
321 | 321 | ||
322 | down_read(&mm->mmap_sem); | ||
322 | vma = find_vma(mm, addr); | 323 | vma = find_vma(mm, addr); |
323 | if (!vma || vma->vm_start > addr) | 324 | if (!vma || vma->vm_start > addr) |
324 | return; | 325 | goto out; |
325 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | 326 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
326 | return; | 327 | goto out; |
327 | break_ksm(vma, addr); | 328 | break_ksm(vma, addr); |
328 | } | 329 | out: |
329 | |||
330 | static void break_cow(struct mm_struct *mm, unsigned long addr) | ||
331 | { | ||
332 | down_read(&mm->mmap_sem); | ||
333 | __break_cow(mm, addr); | ||
334 | up_read(&mm->mmap_sem); | 330 | up_read(&mm->mmap_sem); |
335 | } | 331 | } |
336 | 332 | ||
@@ -439,17 +435,6 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |||
439 | cond_resched(); /* we're called from many long loops */ | 435 | cond_resched(); /* we're called from many long loops */ |
440 | } | 436 | } |
441 | 437 | ||
442 | static void remove_all_slot_rmap_items(struct mm_slot *mm_slot) | ||
443 | { | ||
444 | struct rmap_item *rmap_item, *node; | ||
445 | |||
446 | list_for_each_entry_safe(rmap_item, node, &mm_slot->rmap_list, link) { | ||
447 | remove_rmap_item_from_tree(rmap_item); | ||
448 | list_del(&rmap_item->link); | ||
449 | free_rmap_item(rmap_item); | ||
450 | } | ||
451 | } | ||
452 | |||
453 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, | 438 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
454 | struct list_head *cur) | 439 | struct list_head *cur) |
455 | { | 440 | { |
@@ -471,6 +456,11 @@ static void remove_trailing_rmap_items(struct mm_slot *mm_slot, | |||
471 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing | 456 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing |
472 | * rmap_items from parent to child at fork time (so as not to waste time | 457 | * rmap_items from parent to child at fork time (so as not to waste time |
473 | * if exit comes before the next scan reaches it). | 458 | * if exit comes before the next scan reaches it). |
459 | * | ||
460 | * Similarly, although we'd like to remove rmap_items (so updating counts | ||
461 | * and freeing memory) when unmerging an area, it's easier to leave that | ||
462 | * to the next pass of ksmd - consider, for example, how ksmd might be | ||
463 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | ||
474 | */ | 464 | */ |
475 | static void unmerge_ksm_pages(struct vm_area_struct *vma, | 465 | static void unmerge_ksm_pages(struct vm_area_struct *vma, |
476 | unsigned long start, unsigned long end) | 466 | unsigned long start, unsigned long end) |
@@ -495,7 +485,7 @@ static void unmerge_and_remove_all_rmap_items(void) | |||
495 | continue; | 485 | continue; |
496 | unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); | 486 | unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); |
497 | } | 487 | } |
498 | remove_all_slot_rmap_items(mm_slot); | 488 | remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); |
499 | up_read(&mm->mmap_sem); | 489 | up_read(&mm->mmap_sem); |
500 | } | 490 | } |
501 | 491 | ||
@@ -533,7 +523,7 @@ static void remove_mm_from_lists(struct mm_struct *mm) | |||
533 | list_del(&mm_slot->mm_list); | 523 | list_del(&mm_slot->mm_list); |
534 | spin_unlock(&ksm_mmlist_lock); | 524 | spin_unlock(&ksm_mmlist_lock); |
535 | 525 | ||
536 | remove_all_slot_rmap_items(mm_slot); | 526 | remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); |
537 | free_mm_slot(mm_slot); | 527 | free_mm_slot(mm_slot); |
538 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | 528 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); |
539 | } | 529 | } |
@@ -740,6 +730,29 @@ out: | |||
740 | } | 730 | } |
741 | 731 | ||
742 | /* | 732 | /* |
733 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | ||
734 | * but no new kernel page is allocated: kpage must already be a ksm page. | ||
735 | */ | ||
736 | static int try_to_merge_with_ksm_page(struct mm_struct *mm1, | ||
737 | unsigned long addr1, | ||
738 | struct page *page1, | ||
739 | struct page *kpage) | ||
740 | { | ||
741 | struct vm_area_struct *vma; | ||
742 | int err = -EFAULT; | ||
743 | |||
744 | down_read(&mm1->mmap_sem); | ||
745 | vma = find_vma(mm1, addr1); | ||
746 | if (!vma || vma->vm_start > addr1) | ||
747 | goto out; | ||
748 | |||
749 | err = try_to_merge_one_page(vma, page1, kpage); | ||
750 | out: | ||
751 | up_read(&mm1->mmap_sem); | ||
752 | return err; | ||
753 | } | ||
754 | |||
755 | /* | ||
743 | * try_to_merge_two_pages - take two identical pages and prepare them | 756 | * try_to_merge_two_pages - take two identical pages and prepare them |
744 | * to be merged into one page. | 757 | * to be merged into one page. |
745 | * | 758 | * |
@@ -772,9 +785,8 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, | |||
772 | down_read(&mm1->mmap_sem); | 785 | down_read(&mm1->mmap_sem); |
773 | vma = find_vma(mm1, addr1); | 786 | vma = find_vma(mm1, addr1); |
774 | if (!vma || vma->vm_start > addr1) { | 787 | if (!vma || vma->vm_start > addr1) { |
775 | put_page(kpage); | ||
776 | up_read(&mm1->mmap_sem); | 788 | up_read(&mm1->mmap_sem); |
777 | return err; | 789 | goto out; |
778 | } | 790 | } |
779 | 791 | ||
780 | copy_user_highpage(kpage, page1, addr1, vma); | 792 | copy_user_highpage(kpage, page1, addr1, vma); |
@@ -782,56 +794,20 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1, | |||
782 | up_read(&mm1->mmap_sem); | 794 | up_read(&mm1->mmap_sem); |
783 | 795 | ||
784 | if (!err) { | 796 | if (!err) { |
785 | down_read(&mm2->mmap_sem); | 797 | err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage); |
786 | vma = find_vma(mm2, addr2); | ||
787 | if (!vma || vma->vm_start > addr2) { | ||
788 | put_page(kpage); | ||
789 | up_read(&mm2->mmap_sem); | ||
790 | break_cow(mm1, addr1); | ||
791 | return -EFAULT; | ||
792 | } | ||
793 | |||
794 | err = try_to_merge_one_page(vma, page2, kpage); | ||
795 | up_read(&mm2->mmap_sem); | ||
796 | |||
797 | /* | 798 | /* |
798 | * If the second try_to_merge_one_page failed, we have a | 799 | * If that fails, we have a ksm page with only one pte |
799 | * ksm page with just one pte pointing to it, so break it. | 800 | * pointing to it: so break it. |
800 | */ | 801 | */ |
801 | if (err) | 802 | if (err) |
802 | break_cow(mm1, addr1); | 803 | break_cow(mm1, addr1); |
803 | } | 804 | } |
804 | 805 | out: | |
805 | put_page(kpage); | 806 | put_page(kpage); |
806 | return err; | 807 | return err; |
807 | } | 808 | } |
808 | 809 | ||
809 | /* | 810 | /* |
810 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | ||
811 | * but no new kernel page is allocated: kpage must already be a ksm page. | ||
812 | */ | ||
813 | static int try_to_merge_with_ksm_page(struct mm_struct *mm1, | ||
814 | unsigned long addr1, | ||
815 | struct page *page1, | ||
816 | struct page *kpage) | ||
817 | { | ||
818 | struct vm_area_struct *vma; | ||
819 | int err = -EFAULT; | ||
820 | |||
821 | down_read(&mm1->mmap_sem); | ||
822 | vma = find_vma(mm1, addr1); | ||
823 | if (!vma || vma->vm_start > addr1) { | ||
824 | up_read(&mm1->mmap_sem); | ||
825 | return err; | ||
826 | } | ||
827 | |||
828 | err = try_to_merge_one_page(vma, page1, kpage); | ||
829 | up_read(&mm1->mmap_sem); | ||
830 | |||
831 | return err; | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * stable_tree_search - search page inside the stable tree | 811 | * stable_tree_search - search page inside the stable tree |
836 | * @page: the page that we are searching identical pages to. | 812 | * @page: the page that we are searching identical pages to. |
837 | * @page2: pointer into identical page that we are holding inside the stable | 813 | * @page2: pointer into identical page that we are holding inside the stable |
@@ -1033,10 +1009,10 @@ static void stable_tree_append(struct rmap_item *rmap_item, | |||
1033 | } | 1009 | } |
1034 | 1010 | ||
1035 | /* | 1011 | /* |
1036 | * cmp_and_merge_page - take a page computes its hash value and check if there | 1012 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1037 | * is similar hash value to different page, | 1013 | * if not, compare checksum to previous and if it's the same, see if page can |
1038 | * in case we find that there is similar hash to different page we call to | 1014 | * be inserted into the unstable tree, or merged with a page already there and |
1039 | * try_to_merge_two_pages(). | 1015 | * both transferred to the stable tree. |
1040 | * | 1016 | * |
1041 | * @page: the page that we are searching identical page to. | 1017 | * @page: the page that we are searching identical page to. |
1042 | * @rmap_item: the reverse mapping into the virtual address of this page | 1018 | * @rmap_item: the reverse mapping into the virtual address of this page |