diff options
-rw-r--r-- | fs/proc/page.c | 5 | ||||
-rw-r--r-- | include/linux/ksm.h | 29 | ||||
-rw-r--r-- | mm/memory.c | 3 |
3 files changed, 36 insertions, 1 deletions
diff --git a/fs/proc/page.c b/fs/proc/page.c index 2707c6c7a20f..2281c2cbfe2b 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/compiler.h> | 2 | #include <linux/compiler.h> |
3 | #include <linux/fs.h> | 3 | #include <linux/fs.h> |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/ksm.h> | ||
5 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
6 | #include <linux/mmzone.h> | 7 | #include <linux/mmzone.h> |
7 | #include <linux/proc_fs.h> | 8 | #include <linux/proc_fs.h> |
@@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = { | |||
95 | #define KPF_UNEVICTABLE 18 | 96 | #define KPF_UNEVICTABLE 18 |
96 | #define KPF_NOPAGE 20 | 97 | #define KPF_NOPAGE 20 |
97 | 98 | ||
99 | #define KPF_KSM 21 | ||
100 | |||
98 | /* kernel hacking assistances | 101 | /* kernel hacking assistances |
99 | * WARNING: subject to change, never rely on them! | 102 | * WARNING: subject to change, never rely on them! |
100 | */ | 103 | */ |
@@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page) | |||
137 | u |= 1 << KPF_MMAP; | 140 | u |= 1 << KPF_MMAP; |
138 | if (PageAnon(page)) | 141 | if (PageAnon(page)) |
139 | u |= 1 << KPF_ANON; | 142 | u |= 1 << KPF_ANON; |
143 | if (PageKsm(page)) | ||
144 | u |= 1 << KPF_KSM; | ||
140 | 145 | ||
141 | /* | 146 | /* |
142 | * compound pages: export both head/tail info | 147 | * compound pages: export both head/tail info |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index eb2a448981ee..a485c14ecd5d 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/vmstat.h> | ||
13 | 14 | ||
14 | #ifdef CONFIG_KSM | 15 | #ifdef CONFIG_KSM |
15 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -29,6 +30,27 @@ static inline void ksm_exit(struct mm_struct *mm) | |||
29 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | 30 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
30 | __ksm_exit(mm); | 31 | __ksm_exit(mm); |
31 | } | 32 | } |
33 | |||
34 | /* | ||
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | ||
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | ||
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | ||
38 | */ | ||
39 | static inline int PageKsm(struct page *page) | ||
40 | { | ||
41 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * But we have to avoid the checking which page_add_anon_rmap() performs. | ||
46 | */ | ||
47 | static inline void page_add_ksm_rmap(struct page *page) | ||
48 | { | ||
49 | if (atomic_inc_and_test(&page->_mapcount)) { | ||
50 | page->mapping = (void *) PAGE_MAPPING_ANON; | ||
51 | __inc_zone_page_state(page, NR_ANON_PAGES); | ||
52 | } | ||
53 | } | ||
32 | #else /* !CONFIG_KSM */ | 54 | #else /* !CONFIG_KSM */ |
33 | 55 | ||
34 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 56 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -45,6 +67,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |||
45 | static inline void ksm_exit(struct mm_struct *mm) | 67 | static inline void ksm_exit(struct mm_struct *mm) |
46 | { | 68 | { |
47 | } | 69 | } |
70 | |||
71 | static inline int PageKsm(struct page *page) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* No stub required for page_add_ksm_rmap(page) */ | ||
48 | #endif /* !CONFIG_KSM */ | 77 | #endif /* !CONFIG_KSM */ |
49 | 78 | ||
50 | #endif | 79 | #endif |
diff --git a/mm/memory.c b/mm/memory.c index 7a61a11f1867..1a435b81876c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/swap.h> | 45 | #include <linux/swap.h> |
46 | #include <linux/highmem.h> | 46 | #include <linux/highmem.h> |
47 | #include <linux/pagemap.h> | 47 | #include <linux/pagemap.h> |
48 | #include <linux/ksm.h> | ||
48 | #include <linux/rmap.h> | 49 | #include <linux/rmap.h> |
49 | #include <linux/module.h> | 50 | #include <linux/module.h> |
50 | #include <linux/delayacct.h> | 51 | #include <linux/delayacct.h> |
@@ -1974,7 +1975,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1974 | * Take out anonymous pages first, anonymous shared vmas are | 1975 | * Take out anonymous pages first, anonymous shared vmas are |
1975 | * not dirty accountable. | 1976 | * not dirty accountable. |
1976 | */ | 1977 | */ |
1977 | if (PageAnon(old_page)) { | 1978 | if (PageAnon(old_page) && !PageKsm(old_page)) { |
1978 | if (!trylock_page(old_page)) { | 1979 | if (!trylock_page(old_page)) { |
1979 | page_cache_get(old_page); | 1980 | page_cache_get(old_page); |
1980 | pte_unmap_unlock(page_table, ptl); | 1981 | pte_unmap_unlock(page_table, ptl); |