diff options
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r-- | include/linux/ksm.h | 79 |
1 files changed, 79 insertions, 0 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h new file mode 100644 index 000000000000..a485c14ecd5d --- /dev/null +++ b/include/linux/ksm.h | |||
@@ -0,0 +1,79 @@ | |||
1 | #ifndef __LINUX_KSM_H | ||
2 | #define __LINUX_KSM_H | ||
3 | /* | ||
4 | * Memory merging support. | ||
5 | * | ||
6 | * This code enables dynamic sharing of identical pages found in different | ||
7 | * memory areas, even if they are not shared by fork(). | ||
8 | */ | ||
9 | |||
10 | #include <linux/bitops.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/vmstat.h> | ||
14 | |||
15 | #ifdef CONFIG_KSM | ||
16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | ||
17 | unsigned long end, int advice, unsigned long *vm_flags); | ||
18 | int __ksm_enter(struct mm_struct *mm); | ||
19 | void __ksm_exit(struct mm_struct *mm); | ||
20 | |||
21 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | ||
22 | { | ||
23 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | ||
24 | return __ksm_enter(mm); | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | static inline void ksm_exit(struct mm_struct *mm) | ||
29 | { | ||
30 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | ||
31 | __ksm_exit(mm); | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | ||
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | ||
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | ||
38 | */ | ||
39 | static inline int PageKsm(struct page *page) | ||
40 | { | ||
41 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * But we have to avoid the checking which page_add_anon_rmap() performs. | ||
46 | */ | ||
47 | static inline void page_add_ksm_rmap(struct page *page) | ||
48 | { | ||
49 | if (atomic_inc_and_test(&page->_mapcount)) { | ||
50 | page->mapping = (void *) PAGE_MAPPING_ANON; | ||
51 | __inc_zone_page_state(page, NR_ANON_PAGES); | ||
52 | } | ||
53 | } | ||
54 | #else /* !CONFIG_KSM */ | ||
55 | |||
56 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | ||
57 | unsigned long end, int advice, unsigned long *vm_flags) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static inline void ksm_exit(struct mm_struct *mm) | ||
68 | { | ||
69 | } | ||
70 | |||
71 | static inline int PageKsm(struct page *page) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* No stub required for page_add_ksm_rmap(page) */ | ||
77 | #endif /* !CONFIG_KSM */ | ||
78 | |||
79 | #endif | ||