aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-10-08 19:31:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:42 -0400
commited8ea8150182f8d715fceb3b175ef0a9ebacd872 (patch)
tree3af48f3a947df4dc5a0df660988f61d454a88cf2 /mm
parent86c2ad19956f84f2191e062fcb979367b6365871 (diff)
mm: add CONFIG_DEBUG_VM_RB build option
Add a CONFIG_DEBUG_VM_RB build option for the previously existing DEBUG_MM_RB code. Now that Andi Kleen modified it to avoid using recursive algorithms, we can expose it a bit more. Also extend this code to validate_mm() after stack expansion, and to check that the vma's start and last pgoffs have not changed since the nodes were inserted on the anon vma interval tree (as it is important that the nodes be reindexed after each such update). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/interval_tree.c41
-rw-r--r--mm/mmap.c19
2 files changed, 49 insertions, 11 deletions
diff --git a/mm/interval_tree.c b/mm/interval_tree.c
index f7c72cd35e1d..4a5822a586e6 100644
--- a/mm/interval_tree.c
+++ b/mm/interval_tree.c
@@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
70} 70}
71 71
72INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, 72INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
73 avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree) 73 avc_start_pgoff, avc_last_pgoff,
74 static inline, __anon_vma_interval_tree)
75
76void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
77 struct rb_root *root)
78{
79#ifdef CONFIG_DEBUG_VM_RB
80 node->cached_vma_start = avc_start_pgoff(node);
81 node->cached_vma_last = avc_last_pgoff(node);
82#endif
83 __anon_vma_interval_tree_insert(node, root);
84}
85
86void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
87 struct rb_root *root)
88{
89 __anon_vma_interval_tree_remove(node, root);
90}
91
92struct anon_vma_chain *
93anon_vma_interval_tree_iter_first(struct rb_root *root,
94 unsigned long first, unsigned long last)
95{
96 return __anon_vma_interval_tree_iter_first(root, first, last);
97}
98
99struct anon_vma_chain *
100anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
101 unsigned long first, unsigned long last)
102{
103 return __anon_vma_interval_tree_iter_next(node, first, last);
104}
105
106#ifdef CONFIG_DEBUG_VM_RB
107void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
108{
109 WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
110 WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
111}
112#endif
diff --git a/mm/mmap.c b/mm/mmap.c
index 2e580ed79211..deb422c39e21 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm,
51 struct vm_area_struct *vma, struct vm_area_struct *prev, 51 struct vm_area_struct *vma, struct vm_area_struct *prev,
52 unsigned long start, unsigned long end); 52 unsigned long start, unsigned long end);
53 53
54/*
55 * WARNING: the debugging will use recursive algorithms so never enable this
56 * unless you know what you are doing.
57 */
58#undef DEBUG_MM_RB
59
60/* description of effects of mapping type and prot in current implementation. 54/* description of effects of mapping type and prot in current implementation.
61 * this is due to the limited x86 page protection hardware. The expected 55 * this is due to the limited x86 page protection hardware. The expected
62 * behavior is in parens: 56 * behavior is in parens:
@@ -303,7 +297,7 @@ out:
303 return retval; 297 return retval;
304} 298}
305 299
306#ifdef DEBUG_MM_RB 300#ifdef CONFIG_DEBUG_VM_RB
307static int browse_rb(struct rb_root *root) 301static int browse_rb(struct rb_root *root)
308{ 302{
309 int i = 0, j; 303 int i = 0, j;
@@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm)
337{ 331{
338 int bug = 0; 332 int bug = 0;
339 int i = 0; 333 int i = 0;
340 struct vm_area_struct *tmp = mm->mmap; 334 struct vm_area_struct *vma = mm->mmap;
341 while (tmp) { 335 while (vma) {
342 tmp = tmp->vm_next; 336 struct anon_vma_chain *avc;
337 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
338 anon_vma_interval_tree_verify(avc);
339 vma = vma->vm_next;
343 i++; 340 i++;
344 } 341 }
345 if (i != mm->map_count) 342 if (i != mm->map_count)
@@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1790 } 1787 }
1791 vma_unlock_anon_vma(vma); 1788 vma_unlock_anon_vma(vma);
1792 khugepaged_enter_vma_merge(vma); 1789 khugepaged_enter_vma_merge(vma);
1790 validate_mm(vma->vm_mm);
1793 return error; 1791 return error;
1794} 1792}
1795#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 1793#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma,
1843 } 1841 }
1844 vma_unlock_anon_vma(vma); 1842 vma_unlock_anon_vma(vma);
1845 khugepaged_enter_vma_merge(vma); 1843 khugepaged_enter_vma_merge(vma);
1844 validate_mm(vma->vm_mm);
1846 return error; 1845 return error;
1847} 1846}
1848 1847