diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:59:21 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:19 -0500 |
commit | 08beca44dfb0ab008e365163df70dbd302ae1508 (patch) | |
tree | 33f3ddf5460e139bd7fd37e8c08026a7d852c3da /mm | |
parent | 7b6ba2c7d3baf8cd9f888e05563dcc32e368baab (diff) |
ksm: stable_node point to page and back
Add a pointer to the ksm page into struct stable_node, holding a reference
to the page while the node exists. Put a pointer to the stable_node into
the ksm page's ->mapping.
Then we don't need get_ksm_page() while traversing the stable tree: the
page to compare against is sure to be present and correct, even if it's no
longer visible through any of its existing rmap_items.
And we can handle the forked ksm page case more efficiently: no need to
memcmp our way through the tree to find its match.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 99 |
1 files changed, 34 insertions, 65 deletions
@@ -107,10 +107,12 @@ struct ksm_scan { | |||
107 | 107 | ||
108 | /** | 108 | /** |
109 | * struct stable_node - node of the stable rbtree | 109 | * struct stable_node - node of the stable rbtree |
110 | * @page: pointer to struct page of the ksm page | ||
110 | * @node: rb node of this ksm page in the stable tree | 111 | * @node: rb node of this ksm page in the stable tree |
111 | * @hlist: hlist head of rmap_items using this ksm page | 112 | * @hlist: hlist head of rmap_items using this ksm page |
112 | */ | 113 | */ |
113 | struct stable_node { | 114 | struct stable_node { |
115 | struct page *page; | ||
114 | struct rb_node node; | 116 | struct rb_node node; |
115 | struct hlist_head hlist; | 117 | struct hlist_head hlist; |
116 | }; | 118 | }; |
@@ -435,23 +437,6 @@ out: page = NULL; | |||
435 | } | 437 | } |
436 | 438 | ||
437 | /* | 439 | /* |
438 | * get_ksm_page: checks if the page at the virtual address in rmap_item | ||
439 | * is still PageKsm, in which case we can trust the content of the page, | ||
440 | * and it returns the gotten page; but NULL if the page has been zapped. | ||
441 | */ | ||
442 | static struct page *get_ksm_page(struct rmap_item *rmap_item) | ||
443 | { | ||
444 | struct page *page; | ||
445 | |||
446 | page = get_mergeable_page(rmap_item); | ||
447 | if (page && !PageKsm(page)) { | ||
448 | put_page(page); | ||
449 | page = NULL; | ||
450 | } | ||
451 | return page; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Removing rmap_item from stable or unstable tree. | 440 | * Removing rmap_item from stable or unstable tree. |
456 | * This function will clean the information from the stable/unstable tree. | 441 | * This function will clean the information from the stable/unstable tree. |
457 | */ | 442 | */ |
@@ -465,6 +450,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |||
465 | if (stable_node->hlist.first) | 450 | if (stable_node->hlist.first) |
466 | ksm_pages_sharing--; | 451 | ksm_pages_sharing--; |
467 | else { | 452 | else { |
453 | set_page_stable_node(stable_node->page, NULL); | ||
454 | put_page(stable_node->page); | ||
455 | |||
468 | rb_erase(&stable_node->node, &root_stable_tree); | 456 | rb_erase(&stable_node->node, &root_stable_tree); |
469 | free_stable_node(stable_node); | 457 | free_stable_node(stable_node); |
470 | ksm_pages_shared--; | 458 | ksm_pages_shared--; |
@@ -740,8 +728,7 @@ out: | |||
740 | * try_to_merge_one_page - take two pages and merge them into one | 728 | * try_to_merge_one_page - take two pages and merge them into one |
741 | * @vma: the vma that holds the pte pointing to page | 729 | * @vma: the vma that holds the pte pointing to page |
742 | * @page: the PageAnon page that we want to replace with kpage | 730 | * @page: the PageAnon page that we want to replace with kpage |
743 | * @kpage: the PageKsm page (or newly allocated page which page_add_ksm_rmap | 731 | * @kpage: the PageKsm page that we want to map instead of page |
744 | * will make PageKsm) that we want to map instead of page | ||
745 | * | 732 | * |
746 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | 733 | * This function returns 0 if the pages were merged, -EFAULT otherwise. |
747 | */ | 734 | */ |
@@ -793,6 +780,9 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, | |||
793 | struct vm_area_struct *vma; | 780 | struct vm_area_struct *vma; |
794 | int err = -EFAULT; | 781 | int err = -EFAULT; |
795 | 782 | ||
783 | if (page == kpage) /* ksm page forked */ | ||
784 | return 0; | ||
785 | |||
796 | down_read(&mm->mmap_sem); | 786 | down_read(&mm->mmap_sem); |
797 | if (ksm_test_exit(mm)) | 787 | if (ksm_test_exit(mm)) |
798 | goto out; | 788 | goto out; |
@@ -846,6 +836,9 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, | |||
846 | goto up; | 836 | goto up; |
847 | 837 | ||
848 | copy_user_highpage(kpage, page, rmap_item->address, vma); | 838 | copy_user_highpage(kpage, page, rmap_item->address, vma); |
839 | |||
840 | set_page_stable_node(kpage, NULL); /* mark it PageKsm */ | ||
841 | |||
849 | err = try_to_merge_one_page(vma, page, kpage); | 842 | err = try_to_merge_one_page(vma, page, kpage); |
850 | up: | 843 | up: |
851 | up_read(&mm->mmap_sem); | 844 | up_read(&mm->mmap_sem); |
@@ -876,41 +869,31 @@ up: | |||
876 | * This function returns the stable tree node of identical content if found, | 869 | * This function returns the stable tree node of identical content if found, |
877 | * NULL otherwise. | 870 | * NULL otherwise. |
878 | */ | 871 | */ |
879 | static struct stable_node *stable_tree_search(struct page *page, | 872 | static struct stable_node *stable_tree_search(struct page *page) |
880 | struct page **tree_pagep) | ||
881 | { | 873 | { |
882 | struct rb_node *node = root_stable_tree.rb_node; | 874 | struct rb_node *node = root_stable_tree.rb_node; |
883 | struct stable_node *stable_node; | 875 | struct stable_node *stable_node; |
884 | 876 | ||
877 | stable_node = page_stable_node(page); | ||
878 | if (stable_node) { /* ksm page forked */ | ||
879 | get_page(page); | ||
880 | return stable_node; | ||
881 | } | ||
882 | |||
885 | while (node) { | 883 | while (node) { |
886 | struct hlist_node *hlist, *hnext; | ||
887 | struct rmap_item *tree_rmap_item; | ||
888 | struct page *tree_page; | ||
889 | int ret; | 884 | int ret; |
890 | 885 | ||
886 | cond_resched(); | ||
891 | stable_node = rb_entry(node, struct stable_node, node); | 887 | stable_node = rb_entry(node, struct stable_node, node); |
892 | hlist_for_each_entry_safe(tree_rmap_item, hlist, hnext, | ||
893 | &stable_node->hlist, hlist) { | ||
894 | BUG_ON(!in_stable_tree(tree_rmap_item)); | ||
895 | cond_resched(); | ||
896 | tree_page = get_ksm_page(tree_rmap_item); | ||
897 | if (tree_page) | ||
898 | break; | ||
899 | remove_rmap_item_from_tree(tree_rmap_item); | ||
900 | } | ||
901 | if (!hlist) | ||
902 | return NULL; | ||
903 | 888 | ||
904 | ret = memcmp_pages(page, tree_page); | 889 | ret = memcmp_pages(page, stable_node->page); |
905 | 890 | ||
906 | if (ret < 0) { | 891 | if (ret < 0) |
907 | put_page(tree_page); | ||
908 | node = node->rb_left; | 892 | node = node->rb_left; |
909 | } else if (ret > 0) { | 893 | else if (ret > 0) |
910 | put_page(tree_page); | ||
911 | node = node->rb_right; | 894 | node = node->rb_right; |
912 | } else { | 895 | else { |
913 | *tree_pagep = tree_page; | 896 | get_page(stable_node->page); |
914 | return stable_node; | 897 | return stable_node; |
915 | } | 898 | } |
916 | } | 899 | } |
@@ -932,26 +915,12 @@ static struct stable_node *stable_tree_insert(struct page *kpage) | |||
932 | struct stable_node *stable_node; | 915 | struct stable_node *stable_node; |
933 | 916 | ||
934 | while (*new) { | 917 | while (*new) { |
935 | struct hlist_node *hlist, *hnext; | ||
936 | struct rmap_item *tree_rmap_item; | ||
937 | struct page *tree_page; | ||
938 | int ret; | 918 | int ret; |
939 | 919 | ||
920 | cond_resched(); | ||
940 | stable_node = rb_entry(*new, struct stable_node, node); | 921 | stable_node = rb_entry(*new, struct stable_node, node); |
941 | hlist_for_each_entry_safe(tree_rmap_item, hlist, hnext, | ||
942 | &stable_node->hlist, hlist) { | ||
943 | BUG_ON(!in_stable_tree(tree_rmap_item)); | ||
944 | cond_resched(); | ||
945 | tree_page = get_ksm_page(tree_rmap_item); | ||
946 | if (tree_page) | ||
947 | break; | ||
948 | remove_rmap_item_from_tree(tree_rmap_item); | ||
949 | } | ||
950 | if (!hlist) | ||
951 | return NULL; | ||
952 | 922 | ||
953 | ret = memcmp_pages(kpage, tree_page); | 923 | ret = memcmp_pages(kpage, stable_node->page); |
954 | put_page(tree_page); | ||
955 | 924 | ||
956 | parent = *new; | 925 | parent = *new; |
957 | if (ret < 0) | 926 | if (ret < 0) |
@@ -977,6 +946,10 @@ static struct stable_node *stable_tree_insert(struct page *kpage) | |||
977 | 946 | ||
978 | INIT_HLIST_HEAD(&stable_node->hlist); | 947 | INIT_HLIST_HEAD(&stable_node->hlist); |
979 | 948 | ||
949 | get_page(kpage); | ||
950 | stable_node->page = kpage; | ||
951 | set_page_stable_node(kpage, stable_node); | ||
952 | |||
980 | return stable_node; | 953 | return stable_node; |
981 | } | 954 | } |
982 | 955 | ||
@@ -1085,14 +1058,10 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |||
1085 | remove_rmap_item_from_tree(rmap_item); | 1058 | remove_rmap_item_from_tree(rmap_item); |
1086 | 1059 | ||
1087 | /* We first start with searching the page inside the stable tree */ | 1060 | /* We first start with searching the page inside the stable tree */ |
1088 | stable_node = stable_tree_search(page, &tree_page); | 1061 | stable_node = stable_tree_search(page); |
1089 | if (stable_node) { | 1062 | if (stable_node) { |
1090 | kpage = tree_page; | 1063 | kpage = stable_node->page; |
1091 | if (page == kpage) /* forked */ | 1064 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
1092 | err = 0; | ||
1093 | else | ||
1094 | err = try_to_merge_with_ksm_page(rmap_item, | ||
1095 | page, kpage); | ||
1096 | if (!err) { | 1065 | if (!err) { |
1097 | /* | 1066 | /* |
1098 | * The page was successfully merged: | 1067 | * The page was successfully merged: |