aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/ksm.h
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:02:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:33 -0400
commita913e182ab9484308e870af37a14d372742d53b0 (patch)
tree0fca5598aeba4c53999ec46b6b82b46f9a981965 /include/linux/ksm.h
parent8314c4f24a0a5c9b1f7544e9fa83a1d5367ddaa7 (diff)
ksm: clean up obsolete references
A few cleanups, given the munlock fix: the comment on ksm_test_exit() no longer applies, and it can be made private to ksm.c; there's no more reference to mmu_gather or tlb.h, and mmap.c doesn't need ksm.h. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r--include/linux/ksm.h20
1 files changed, 0 insertions, 20 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 0e26de6adb51..a485c14ecd5d 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -12,8 +12,6 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/vmstat.h> 13#include <linux/vmstat.h>
14 14
15struct mmu_gather;
16
17#ifdef CONFIG_KSM 15#ifdef CONFIG_KSM
18int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 16int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
19 unsigned long end, int advice, unsigned long *vm_flags); 17 unsigned long end, int advice, unsigned long *vm_flags);
@@ -27,19 +25,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
27 return 0; 25 return 0;
28} 26}
29 27
30/*
31 * For KSM to handle OOM without deadlock when it's breaking COW in a
32 * likely victim of the OOM killer, exit_mmap() has to serialize with
33 * ksm_exit() after freeing mm's pages but before freeing its page tables.
34 * That leaves a window in which KSM might refault pages which have just
35 * been finally unmapped: guard against that with ksm_test_exit(), and
36 * use it after getting mmap_sem in ksm.c, to check if mm is exiting.
37 */
38static inline bool ksm_test_exit(struct mm_struct *mm)
39{
40 return atomic_read(&mm->mm_users) == 0;
41}
42
43static inline void ksm_exit(struct mm_struct *mm) 28static inline void ksm_exit(struct mm_struct *mm)
44{ 29{
45 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 30 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
@@ -79,11 +64,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
79 return 0; 64 return 0;
80} 65}
81 66
82static inline bool ksm_test_exit(struct mm_struct *mm)
83{
84 return 0;
85}
86
87static inline void ksm_exit(struct mm_struct *mm) 67static inline void ksm_exit(struct mm_struct *mm)
88{ 68{
89} 69}