aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:45 -0500
commit94fcc585fb85ad7b059c70872489b50044d401f3 (patch)
tree67efce3803149bec77df1f50a06f384deae02ba6 /include/linux/huge_mm.h
parentbc835011afbea3957217ee716093d791fb2fe44f (diff)
thp: avoid breaking huge pmd invariants in case of vma_adjust failures
An huge pmd can only be mapped if the corresponding 2M virtual range is fully contained in the vma. At times the VM calls split_vma twice, if the first split_vma succeeds and the second fail, the first split_vma remains in effect and it's not rolled back. For split_vma or vma_adjust to fail an allocation failure is needed so it's a very unlikely event (the out of memory killer would normally fire before any allocation failure is visible to kernel and userland and if an out of memory condition happens it's unlikely to happen exactly here). Nevertheless it's safer to ensure that no huge pmd can be left around if the vma is adjusted in a way that can't fit hugepages anymore at the new vm_start/vm_end address. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c590b08c6fa6..827595228734 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -104,6 +104,19 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
104#error "hugepages can't be allocated by the buddy allocator" 104#error "hugepages can't be allocated by the buddy allocator"
105#endif 105#endif
106extern int hugepage_madvise(unsigned long *vm_flags); 106extern int hugepage_madvise(unsigned long *vm_flags);
107extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
108 unsigned long start,
109 unsigned long end,
110 long adjust_next);
111static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
112 unsigned long start,
113 unsigned long end,
114 long adjust_next)
115{
116 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
117 return;
118 __vma_adjust_trans_huge(vma, start, end, adjust_next);
119}
107#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 120#else /* CONFIG_TRANSPARENT_HUGEPAGE */
108#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 121#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
109#define HPAGE_PMD_MASK ({ BUG(); 0; }) 122#define HPAGE_PMD_MASK ({ BUG(); 0; })
@@ -125,6 +138,12 @@ static inline int hugepage_madvise(unsigned long *vm_flags)
125 BUG(); 138 BUG();
126 return 0; 139 return 0;
127} 140}
141static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
142 unsigned long start,
143 unsigned long end,
144 long adjust_next)
145{
146}
128#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 147#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
129 148
130#endif /* _LINUX_HUGE_MM_H */ 149#endif /* _LINUX_HUGE_MM_H */