diff options
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | include/linux/rmap.h | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 9 | ||||
-rw-r--r-- | mm/interval_tree.c | 41 | ||||
-rw-r--r-- | mm/mmap.c | 19 |
5 files changed, 64 insertions, 11 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0cdab4e0f814..0e6f9c9f2123 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1386,6 +1386,9 @@ struct anon_vma_chain *anon_vma_interval_tree_iter_first( | |||
1386 | struct rb_root *root, unsigned long start, unsigned long last); | 1386 | struct rb_root *root, unsigned long start, unsigned long last); |
1387 | struct anon_vma_chain *anon_vma_interval_tree_iter_next( | 1387 | struct anon_vma_chain *anon_vma_interval_tree_iter_next( |
1388 | struct anon_vma_chain *node, unsigned long start, unsigned long last); | 1388 | struct anon_vma_chain *node, unsigned long start, unsigned long last); |
1389 | #ifdef CONFIG_DEBUG_VM_RB | ||
1390 | void anon_vma_interval_tree_verify(struct anon_vma_chain *node); | ||
1391 | #endif | ||
1389 | 1392 | ||
1390 | #define anon_vma_interval_tree_foreach(avc, root, start, last) \ | 1393 | #define anon_vma_interval_tree_foreach(avc, root, start, last) \ |
1391 | for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ | 1394 | for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index dce44f7d3ed8..b2cce644ffc7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -66,6 +66,9 @@ struct anon_vma_chain { | |||
66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ | 66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
67 | struct rb_node rb; /* locked by anon_vma->mutex */ | 67 | struct rb_node rb; /* locked by anon_vma->mutex */ |
68 | unsigned long rb_subtree_last; | 68 | unsigned long rb_subtree_last; |
69 | #ifdef CONFIG_DEBUG_VM_RB | ||
70 | unsigned long cached_vma_start, cached_vma_last; | ||
71 | #endif | ||
69 | }; | 72 | }; |
70 | 73 | ||
71 | #ifdef CONFIG_MMU | 74 | #ifdef CONFIG_MMU |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a6e7e7741523..28e9d6c98941 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -798,6 +798,15 @@ config DEBUG_VM | |||
798 | 798 | ||
799 | If unsure, say N. | 799 | If unsure, say N. |
800 | 800 | ||
801 | config DEBUG_VM_RB | ||
802 | bool "Debug VM red-black trees" | ||
803 | depends on DEBUG_VM | ||
804 | help | ||
805 | Enable this to turn on more extended checks in the virtual-memory | ||
806 | system that may impact performance. | ||
807 | |||
808 | If unsure, say N. | ||
809 | |||
801 | config DEBUG_VIRTUAL | 810 | config DEBUG_VIRTUAL |
802 | bool "Debug VM translations" | 811 | bool "Debug VM translations" |
803 | depends on DEBUG_KERNEL && X86 | 812 | depends on DEBUG_KERNEL && X86 |
diff --git a/mm/interval_tree.c b/mm/interval_tree.c index f7c72cd35e1d..4a5822a586e6 100644 --- a/mm/interval_tree.c +++ b/mm/interval_tree.c | |||
@@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, | 72 | INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, |
73 | avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree) | 73 | avc_start_pgoff, avc_last_pgoff, |
74 | static inline, __anon_vma_interval_tree) | ||
75 | |||
76 | void anon_vma_interval_tree_insert(struct anon_vma_chain *node, | ||
77 | struct rb_root *root) | ||
78 | { | ||
79 | #ifdef CONFIG_DEBUG_VM_RB | ||
80 | node->cached_vma_start = avc_start_pgoff(node); | ||
81 | node->cached_vma_last = avc_last_pgoff(node); | ||
82 | #endif | ||
83 | __anon_vma_interval_tree_insert(node, root); | ||
84 | } | ||
85 | |||
86 | void anon_vma_interval_tree_remove(struct anon_vma_chain *node, | ||
87 | struct rb_root *root) | ||
88 | { | ||
89 | __anon_vma_interval_tree_remove(node, root); | ||
90 | } | ||
91 | |||
92 | struct anon_vma_chain * | ||
93 | anon_vma_interval_tree_iter_first(struct rb_root *root, | ||
94 | unsigned long first, unsigned long last) | ||
95 | { | ||
96 | return __anon_vma_interval_tree_iter_first(root, first, last); | ||
97 | } | ||
98 | |||
99 | struct anon_vma_chain * | ||
100 | anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, | ||
101 | unsigned long first, unsigned long last) | ||
102 | { | ||
103 | return __anon_vma_interval_tree_iter_next(node, first, last); | ||
104 | } | ||
105 | |||
106 | #ifdef CONFIG_DEBUG_VM_RB | ||
107 | void anon_vma_interval_tree_verify(struct anon_vma_chain *node) | ||
108 | { | ||
109 | WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node)); | ||
110 | WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node)); | ||
111 | } | ||
112 | #endif | ||
@@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm, | |||
51 | struct vm_area_struct *vma, struct vm_area_struct *prev, | 51 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
52 | unsigned long start, unsigned long end); | 52 | unsigned long start, unsigned long end); |
53 | 53 | ||
54 | /* | ||
55 | * WARNING: the debugging will use recursive algorithms so never enable this | ||
56 | * unless you know what you are doing. | ||
57 | */ | ||
58 | #undef DEBUG_MM_RB | ||
59 | |||
60 | /* description of effects of mapping type and prot in current implementation. | 54 | /* description of effects of mapping type and prot in current implementation. |
61 | * this is due to the limited x86 page protection hardware. The expected | 55 | * this is due to the limited x86 page protection hardware. The expected |
62 | * behavior is in parens: | 56 | * behavior is in parens: |
@@ -303,7 +297,7 @@ out: | |||
303 | return retval; | 297 | return retval; |
304 | } | 298 | } |
305 | 299 | ||
306 | #ifdef DEBUG_MM_RB | 300 | #ifdef CONFIG_DEBUG_VM_RB |
307 | static int browse_rb(struct rb_root *root) | 301 | static int browse_rb(struct rb_root *root) |
308 | { | 302 | { |
309 | int i = 0, j; | 303 | int i = 0, j; |
@@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm) | |||
337 | { | 331 | { |
338 | int bug = 0; | 332 | int bug = 0; |
339 | int i = 0; | 333 | int i = 0; |
340 | struct vm_area_struct *tmp = mm->mmap; | 334 | struct vm_area_struct *vma = mm->mmap; |
341 | while (tmp) { | 335 | while (vma) { |
342 | tmp = tmp->vm_next; | 336 | struct anon_vma_chain *avc; |
337 | list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) | ||
338 | anon_vma_interval_tree_verify(avc); | ||
339 | vma = vma->vm_next; | ||
343 | i++; | 340 | i++; |
344 | } | 341 | } |
345 | if (i != mm->map_count) | 342 | if (i != mm->map_count) |
@@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
1790 | } | 1787 | } |
1791 | vma_unlock_anon_vma(vma); | 1788 | vma_unlock_anon_vma(vma); |
1792 | khugepaged_enter_vma_merge(vma); | 1789 | khugepaged_enter_vma_merge(vma); |
1790 | validate_mm(vma->vm_mm); | ||
1793 | return error; | 1791 | return error; |
1794 | } | 1792 | } |
1795 | #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ | 1793 | #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ |
@@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma, | |||
1843 | } | 1841 | } |
1844 | vma_unlock_anon_vma(vma); | 1842 | vma_unlock_anon_vma(vma); |
1845 | khugepaged_enter_vma_merge(vma); | 1843 | khugepaged_enter_vma_merge(vma); |
1844 | validate_mm(vma->vm_mm); | ||
1846 | return error; | 1845 | return error; |
1847 | } | 1846 | } |
1848 | 1847 | ||