aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/khugepaged.h
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:46:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:43 -0500
commitba76149f47d8c939efa0acc07a191237af900471 (patch)
tree162990f51dd24984f114cba14fc7169a3b54f0f1 /include/linux/khugepaged.h
parent79134171df238171daa4c024a42b77b401ccb00b (diff)
thp: khugepaged
Add khugepaged to relocate fragmented pages into hugepages if new hugepages become available. (this is indipendent of the defrag logic that will have to make new hugepages available) The fundamental reason why khugepaged is unavoidable, is that some memory can be fragmented and not everything can be relocated. So when a virtual machine quits and releases gigabytes of hugepages, we want to use those freely available hugepages to create huge-pmd in the other virtual machines that may be running on fragmented memory, to maximize the CPU efficiency at all times. The scan is slow, it takes nearly zero cpu time, except when it copies data (in which case it means we definitely want to pay for that cpu time) so it seems a good tradeoff. In addition to the hugepages being released by other process releasing memory, we have the strong suspicion that the performance impact of potentially defragmenting hugepages during or before each page fault could lead to more performance inconsistency than allocating small pages at first and having them collapsed into large pages later... if they prove themselfs to be long lived mappings (khugepaged scan is slow so short lived mappings have low probability to run into khugepaged if compared to long lived mappings). Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/khugepaged.h')
-rw-r--r--include/linux/khugepaged.h66
1 files changed, 66 insertions, 0 deletions
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
new file mode 100644
index 000000000000..552f3184756c
--- /dev/null
+++ b/include/linux/khugepaged.h
@@ -0,0 +1,66 @@
1#ifndef _LINUX_KHUGEPAGED_H
2#define _LINUX_KHUGEPAGED_H
3
4#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
5
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
10
11#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \
13 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
14 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
15#define khugepaged_always() \
16 (transparent_hugepage_flags & \
17 (1<<TRANSPARENT_HUGEPAGE_FLAG))
18#define khugepaged_req_madv() \
19 (transparent_hugepage_flags & \
20 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
21#define khugepaged_defrag() \
22 (transparent_hugepage_flags & \
23 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
24
25static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
26{
27 if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
28 return __khugepaged_enter(mm);
29 return 0;
30}
31
32static inline void khugepaged_exit(struct mm_struct *mm)
33{
34 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
35 __khugepaged_exit(mm);
36}
37
38static inline int khugepaged_enter(struct vm_area_struct *vma)
39{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if (khugepaged_always() ||
42 (khugepaged_req_madv() &&
43 vma->vm_flags & VM_HUGEPAGE))
44 if (__khugepaged_enter(vma->vm_mm))
45 return -ENOMEM;
46 return 0;
47}
48#else /* CONFIG_TRANSPARENT_HUGEPAGE */
49static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
50{
51 return 0;
52}
53static inline void khugepaged_exit(struct mm_struct *mm)
54{
55}
56static inline int khugepaged_enter(struct vm_area_struct *vma)
57{
58 return 0;
59}
60static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
61{
62 return 0;
63}
64#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
65
66#endif /* _LINUX_KHUGEPAGED_H */