aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:47 -0500
commit60ab3244ec85c44276c585a2a20d3750402e1cf4 (patch)
treee69e866b370243fc58a6fc721e5347a265e8fd4f /include/linux/huge_mm.h
parenta664b2d8555c659127bf8fe049a58449d394a707 (diff)
thp: khugepaged: make khugepaged aware about madvise
MADV_HUGEPAGE and MADV_NOHUGEPAGE were fully effective only if run after mmap and before touching the memory. While this is enough for most usages, it's little effort to make madvise more dynamic at runtime on an existing mapping by making khugepaged aware about madvise. MADV_HUGEPAGE: register in khugepaged immediately without waiting a page fault (that may not ever happen if all pages are already mapped and the "enabled" knob was set to madvise during the initial page faults). MADV_NOHUGEPAGE: skip vmas marked VM_NOHUGEPAGE in khugepaged to stop collapsing pages where not needed. [akpm@linux-foundation.org: tweak comment] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Michael Kerrisk <mtk.manpages@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a8b7e42d19ec..bddfba1d7b85 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -105,7 +105,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
105#if HPAGE_PMD_ORDER > MAX_ORDER 105#if HPAGE_PMD_ORDER > MAX_ORDER
106#error "hugepages can't be allocated by the buddy allocator" 106#error "hugepages can't be allocated by the buddy allocator"
107#endif 107#endif
108extern int hugepage_madvise(unsigned long *vm_flags, int advice); 108extern int hugepage_madvise(struct vm_area_struct *vma,
109 unsigned long *vm_flags, int advice);
109extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 110extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
110 unsigned long start, 111 unsigned long start,
111 unsigned long end, 112 unsigned long end,
@@ -143,7 +144,8 @@ static inline int split_huge_page(struct page *page)
143 do { } while (0) 144 do { } while (0)
144#define wait_split_huge_page(__anon_vma, __pmd) \ 145#define wait_split_huge_page(__anon_vma, __pmd) \
145 do { } while (0) 146 do { } while (0)
146static inline int hugepage_madvise(unsigned long *vm_flags, int advice) 147static inline int hugepage_madvise(struct vm_area_struct *vma,
148 unsigned long *vm_flags, int advice)
147{ 149{
148 BUG(); 150 BUG();
149 return 0; 151 return 0;