diff options
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r-- | include/linux/ksm.h | 96 |
1 files changed, 82 insertions, 14 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a485c14ecd5d..43bdab769fc3 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -9,8 +9,12 @@ | |||
9 | 9 | ||
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/rmap.h> | ||
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/vmstat.h> | 15 | |
16 | struct stable_node; | ||
17 | struct mem_cgroup; | ||
14 | 18 | ||
15 | #ifdef CONFIG_KSM | 19 | #ifdef CONFIG_KSM |
16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm) | |||
34 | /* | 38 | /* |
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | 39 | * A KSM page is one of those write-protected "shared pages" or "merged pages" |
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | 40 | * which KSM maps into multiple mms, wherever identical anonymous page content |
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | 41 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
42 | * anon_vma, but to that page's node of the stable tree. | ||
38 | */ | 43 | */ |
39 | static inline int PageKsm(struct page *page) | 44 | static inline int PageKsm(struct page *page) |
40 | { | 45 | { |
41 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | 46 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
47 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | ||
48 | } | ||
49 | |||
50 | static inline struct stable_node *page_stable_node(struct page *page) | ||
51 | { | ||
52 | return PageKsm(page) ? page_rmapping(page) : NULL; | ||
53 | } | ||
54 | |||
55 | static inline void set_page_stable_node(struct page *page, | ||
56 | struct stable_node *stable_node) | ||
57 | { | ||
58 | page->mapping = (void *)stable_node + | ||
59 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | ||
42 | } | 60 | } |
43 | 61 | ||
44 | /* | 62 | /* |
45 | * But we have to avoid the checking which page_add_anon_rmap() performs. | 63 | * When do_swap_page() first faults in from swap what used to be a KSM page, |
64 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | ||
65 | * it might be faulted into a different anon_vma (or perhaps to a different | ||
66 | * offset in the same anon_vma). do_swap_page() cannot do all the locking | ||
67 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | ||
68 | * a copy, and leave remerging the pages to a later pass of ksmd. | ||
69 | * | ||
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | ||
71 | * but what if the vma was unmerged while the page was swapped out? | ||
46 | */ | 72 | */ |
47 | static inline void page_add_ksm_rmap(struct page *page) | 73 | struct page *ksm_does_need_to_copy(struct page *page, |
74 | struct vm_area_struct *vma, unsigned long address); | ||
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
76 | struct vm_area_struct *vma, unsigned long address) | ||
48 | { | 77 | { |
49 | if (atomic_inc_and_test(&page->_mapcount)) { | 78 | struct anon_vma *anon_vma = page_anon_vma(page); |
50 | page->mapping = (void *) PAGE_MAPPING_ANON; | 79 | |
51 | __inc_zone_page_state(page, NR_ANON_PAGES); | 80 | if (!anon_vma || |
52 | } | 81 | (anon_vma == vma->anon_vma && |
82 | page->index == linear_page_index(vma, address))) | ||
83 | return page; | ||
84 | |||
85 | return ksm_does_need_to_copy(page, vma, address); | ||
53 | } | 86 | } |
87 | |||
88 | int page_referenced_ksm(struct page *page, | ||
89 | struct mem_cgroup *memcg, unsigned long *vm_flags); | ||
90 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); | ||
91 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | ||
92 | struct vm_area_struct *, unsigned long, void *), void *arg); | ||
93 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | ||
94 | |||
54 | #else /* !CONFIG_KSM */ | 95 | #else /* !CONFIG_KSM */ |
55 | 96 | ||
97 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static inline void ksm_exit(struct mm_struct *mm) | ||
103 | { | ||
104 | } | ||
105 | |||
106 | static inline int PageKsm(struct page *page) | ||
107 | { | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | #ifdef CONFIG_MMU | ||
56 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 112 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
57 | unsigned long end, int advice, unsigned long *vm_flags) | 113 | unsigned long end, int advice, unsigned long *vm_flags) |
58 | { | 114 | { |
59 | return 0; | 115 | return 0; |
60 | } | 116 | } |
61 | 117 | ||
62 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | 118 | static inline struct page *ksm_might_need_to_copy(struct page *page, |
119 | struct vm_area_struct *vma, unsigned long address) | ||
120 | { | ||
121 | return page; | ||
122 | } | ||
123 | |||
124 | static inline int page_referenced_ksm(struct page *page, | ||
125 | struct mem_cgroup *memcg, unsigned long *vm_flags) | ||
63 | { | 126 | { |
64 | return 0; | 127 | return 0; |
65 | } | 128 | } |
66 | 129 | ||
67 | static inline void ksm_exit(struct mm_struct *mm) | 130 | static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) |
68 | { | 131 | { |
132 | return 0; | ||
69 | } | 133 | } |
70 | 134 | ||
71 | static inline int PageKsm(struct page *page) | 135 | static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, |
136 | struct vm_area_struct *, unsigned long, void *), void *arg) | ||
72 | { | 137 | { |
73 | return 0; | 138 | return 0; |
74 | } | 139 | } |
75 | 140 | ||
76 | /* No stub required for page_add_ksm_rmap(page) */ | 141 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
142 | { | ||
143 | } | ||
144 | #endif /* CONFIG_MMU */ | ||
77 | #endif /* !CONFIG_KSM */ | 145 | #endif /* !CONFIG_KSM */ |
78 | 146 | ||
79 | #endif | 147 | #endif /* __LINUX_KSM_H */ |