aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-10-09 18:28:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:58 -0400
commit96dad67ff244e797c4bc3e4f7f0fdaa0cfdf0a7d (patch)
treeea45352bf168845bf42d0d656b5d8bdacc55d8e8 /mm
parent31c9afa6db122a5c7a7843278aaf77dd08ea6e98 (diff)
mm: use VM_BUG_ON_MM where possible
Dump the contents of the relevant struct_mm when we hit the bug condition. Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/pagewalk.c2
4 files changed, 7 insertions, 6 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c13148cc745f..74c78aa8bc2f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2048,7 +2048,7 @@ int __khugepaged_enter(struct mm_struct *mm)
2048 return -ENOMEM; 2048 return -ENOMEM;
2049 2049
2050 /* __khugepaged_exit() must not run from under us */ 2050 /* __khugepaged_exit() must not run from under us */
2051 VM_BUG_ON(khugepaged_test_exit(mm)); 2051 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
2052 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 2052 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2053 free_mm_slot(mm_slot); 2053 free_mm_slot(mm_slot);
2054 return 0; 2054 return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index d5d09d0786ec..03aa8512723b 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -235,7 +235,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
235 VM_BUG_ON(end & ~PAGE_MASK); 235 VM_BUG_ON(end & ~PAGE_MASK);
236 VM_BUG_ON_VMA(start < vma->vm_start, vma); 236 VM_BUG_ON_VMA(start < vma->vm_start, vma);
237 VM_BUG_ON_VMA(end > vma->vm_end, vma); 237 VM_BUG_ON_VMA(end > vma->vm_end, vma);
238 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 238 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
239 239
240 gup_flags = FOLL_TOUCH | FOLL_MLOCK; 240 gup_flags = FOLL_TOUCH | FOLL_MLOCK;
241 /* 241 /*
diff --git a/mm/mmap.c b/mm/mmap.c
index c9bc285df255..16d19b48e2ad 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -410,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
410 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 410 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
411 struct vm_area_struct *vma; 411 struct vm_area_struct *vma;
412 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 412 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
413 BUG_ON(vma != ignore && 413 VM_BUG_ON_VMA(vma != ignore &&
414 vma->rb_subtree_gap != vma_compute_subtree_gap(vma)); 414 vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
415 vma);
415 } 416 }
416} 417}
417 418
@@ -448,7 +449,7 @@ static void validate_mm(struct mm_struct *mm)
448 pr_emerg("map_count %d rb %d\n", mm->map_count, i); 449 pr_emerg("map_count %d rb %d\n", mm->map_count, i);
449 bug = 1; 450 bug = 1;
450 } 451 }
451 BUG_ON(bug); 452 VM_BUG_ON_MM(bug, mm);
452} 453}
453#else 454#else
454#define validate_mm_rb(root, ignore) do { } while (0) 455#define validate_mm_rb(root, ignore) do { } while (0)
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 2beeabf502c5..ad83195521f2 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
177 if (!walk->mm) 177 if (!walk->mm)
178 return -EINVAL; 178 return -EINVAL;
179 179
180 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); 180 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
181 181
182 pgd = pgd_offset(walk->mm, addr); 182 pgd = pgd_offset(walk->mm, addr);
183 do { 183 do {