diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:59:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:19 -0500 |
commit | 5ad6468801d28c4d4ac9f48ec19297817c915f6a (patch) | |
tree | edd8dc48693f43278d6fe1614aca2bf660d4dc10 /include/linux | |
parent | 73848b4684e84a84cfd1555af78d41158f31e16b (diff) |
ksm: let shared pages be swappable
Initial implementation for swapping out KSM's shared pages: add
page_referenced_ksm() and try_to_unmap_ksm(), which rmap.c calls when
faced with a PageKsm page.
Most of what's needed can be got from the rmap_items listed from the
stable_node of the ksm page, without discovering the actual vma: so in
this patch just fake up a struct vma for page_referenced_one() or
try_to_unmap_one(), then refine that in the next patch.
Add VM_NONLINEAR to ksm_madvise()'s list of exclusions: it has always been
implicit there (being only set with VM_SHARED, already excluded), but
let's make it explicit, to help justify the lack of nonlinear unmap.
Rely on the page lock to protect against concurrent modifications to that
page's node of the stable tree.
The awkward part is not swapout but swapin: do_swap_page() and
page_add_anon_rmap() now have to allow for new possibilities - perhaps a
ksm page still in swapcache, perhaps a swapcache page associated with one
location in one anon_vma now needed for another location or anon_vma.
(And the vma might even be no longer VM_MERGEABLE when that happens.)
ksm_might_need_to_copy() checks for that case, and supplies a duplicate
page when necessary, simply leaving it to a subsequent pass of ksmd to
rediscover the identity and merge them back into one ksm page.
Disappointingly primitive: but the alternative would have to accumulate
unswappable info about the swapped out ksm pages, limiting swappability.
Remove page_add_ksm_rmap(): page_add_anon_rmap() now has to allow for the
particular case it was handling, so just use it instead.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/ksm.h | 54 | ||||
-rw-r--r-- | include/linux/rmap.h | 5 |
2 files changed, 53 insertions, 6 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index ef55ce14a2ce..157d83dbaef8 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -9,10 +9,12 @@ | |||
9 | 9 | ||
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/rmap.h> | ||
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/vmstat.h> | ||
14 | 15 | ||
15 | struct stable_node; | 16 | struct stable_node; |
17 | struct mem_cgroup; | ||
16 | 18 | ||
17 | #ifdef CONFIG_KSM | 19 | #ifdef CONFIG_KSM |
18 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -57,11 +59,36 @@ static inline void set_page_stable_node(struct page *page, | |||
57 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | 59 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); |
58 | } | 60 | } |
59 | 61 | ||
60 | static inline void page_add_ksm_rmap(struct page *page) | 62 | /* |
63 | * When do_swap_page() first faults in from swap what used to be a KSM page, | ||
64 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | ||
65 | * it might be faulted into a different anon_vma (or perhaps to a different | ||
66 | * offset in the same anon_vma). do_swap_page() cannot do all the locking | ||
67 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | ||
68 | * a copy, and leave remerging the pages to a later pass of ksmd. | ||
69 | * | ||
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | ||
71 | * but what if the vma was unmerged while the page was swapped out? | ||
72 | */ | ||
73 | struct page *ksm_does_need_to_copy(struct page *page, | ||
74 | struct vm_area_struct *vma, unsigned long address); | ||
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
76 | struct vm_area_struct *vma, unsigned long address) | ||
61 | { | 77 | { |
62 | if (atomic_inc_and_test(&page->_mapcount)) | 78 | struct anon_vma *anon_vma = page_anon_vma(page); |
63 | __inc_zone_page_state(page, NR_ANON_PAGES); | 79 | |
80 | if (!anon_vma || | ||
81 | (anon_vma == vma->anon_vma && | ||
82 | page->index == linear_page_index(vma, address))) | ||
83 | return page; | ||
84 | |||
85 | return ksm_does_need_to_copy(page, vma, address); | ||
64 | } | 86 | } |
87 | |||
88 | int page_referenced_ksm(struct page *page, | ||
89 | struct mem_cgroup *memcg, unsigned long *vm_flags); | ||
90 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); | ||
91 | |||
65 | #else /* !CONFIG_KSM */ | 92 | #else /* !CONFIG_KSM */ |
66 | 93 | ||
67 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 94 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -84,7 +111,22 @@ static inline int PageKsm(struct page *page) | |||
84 | return 0; | 111 | return 0; |
85 | } | 112 | } |
86 | 113 | ||
87 | /* No stub required for page_add_ksm_rmap(page) */ | 114 | static inline struct page *ksm_might_need_to_copy(struct page *page, |
115 | struct vm_area_struct *vma, unsigned long address) | ||
116 | { | ||
117 | return page; | ||
118 | } | ||
119 | |||
120 | static inline int page_referenced_ksm(struct page *page, | ||
121 | struct mem_cgroup *memcg, unsigned long *vm_flags) | ||
122 | { | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
88 | #endif /* !CONFIG_KSM */ | 130 | #endif /* !CONFIG_KSM */ |
89 | 131 | ||
90 | #endif | 132 | #endif /* __LINUX_KSM_H */ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1f65af44c6d2..0b4913a4a344 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -89,6 +89,9 @@ static inline void page_dup_rmap(struct page *page) | |||
89 | */ | 89 | */ |
90 | int page_referenced(struct page *, int is_locked, | 90 | int page_referenced(struct page *, int is_locked, |
91 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 91 | struct mem_cgroup *cnt, unsigned long *vm_flags); |
92 | int page_referenced_one(struct page *, struct vm_area_struct *, | ||
93 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | ||
94 | |||
92 | enum ttu_flags { | 95 | enum ttu_flags { |
93 | TTU_UNMAP = 0, /* unmap mode */ | 96 | TTU_UNMAP = 0, /* unmap mode */ |
94 | TTU_MIGRATION = 1, /* migration mode */ | 97 | TTU_MIGRATION = 1, /* migration mode */ |
@@ -102,6 +105,8 @@ enum ttu_flags { | |||
102 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 105 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
103 | 106 | ||
104 | int try_to_unmap(struct page *, enum ttu_flags flags); | 107 | int try_to_unmap(struct page *, enum ttu_flags flags); |
108 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | ||
109 | unsigned long address, enum ttu_flags flags); | ||
105 | 110 | ||
106 | /* | 111 | /* |
107 | * Called from mm/filemap_xip.c to unmap empty zero page | 112 | * Called from mm/filemap_xip.c to unmap empty zero page |