diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2014-10-09 18:28:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:57 -0400 |
commit | 81d1b09c6be66afac7d41ee52279d9bccbce56d8 (patch) | |
tree | 22bc3aa61748eb31bff7a101a3028f45231561eb | |
parent | fa3759ccd5651c4235f572302d58c8ec9ddf1c4b (diff) |
mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA
Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract
more information when they trigger.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michel Lespinasse <walken@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/huge_mm.h | 2 | ||||
-rw-r--r-- | include/linux/rmap.h | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 6 | ||||
-rw-r--r-- | mm/hugetlb.c | 14 | ||||
-rw-r--r-- | mm/interval_tree.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 6 | ||||
-rw-r--r-- | mm/mremap.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 8 |
9 files changed, 24 insertions, 23 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 63579cb8d3dc..ad9051bab267 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, | |||
132 | static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, | 132 | static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, |
133 | spinlock_t **ptl) | 133 | spinlock_t **ptl) |
134 | { | 134 | { |
135 | VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); | 135 | VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); |
136 | if (pmd_trans_huge(*pmd)) | 136 | if (pmd_trans_huge(*pmd)) |
137 | return __pmd_trans_huge_lock(pmd, vma, ptl); | 137 | return __pmd_trans_huge_lock(pmd, vma, ptl); |
138 | else | 138 | else |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be574506e6a9..c0c2bce6b0b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | |||
150 | static inline void anon_vma_merge(struct vm_area_struct *vma, | 150 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
151 | struct vm_area_struct *next) | 151 | struct vm_area_struct *next) |
152 | { | 152 | { |
153 | VM_BUG_ON(vma->anon_vma != next->anon_vma); | 153 | VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); |
154 | unlink_anon_vmas(next); | 154 | unlink_anon_vmas(next); |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 55ab569c31b4..c13148cc745f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1096,7 +1096,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1096 | unsigned long mmun_end; /* For mmu_notifiers */ | 1096 | unsigned long mmun_end; /* For mmu_notifiers */ |
1097 | 1097 | ||
1098 | ptl = pmd_lockptr(mm, pmd); | 1098 | ptl = pmd_lockptr(mm, pmd); |
1099 | VM_BUG_ON(!vma->anon_vma); | 1099 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
1100 | haddr = address & HPAGE_PMD_MASK; | 1100 | haddr = address & HPAGE_PMD_MASK; |
1101 | if (is_huge_zero_pmd(orig_pmd)) | 1101 | if (is_huge_zero_pmd(orig_pmd)) |
1102 | goto alloc; | 1102 | goto alloc; |
@@ -2083,7 +2083,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) | |||
2083 | if (vma->vm_ops) | 2083 | if (vma->vm_ops) |
2084 | /* khugepaged not yet working on file or special mappings */ | 2084 | /* khugepaged not yet working on file or special mappings */ |
2085 | return 0; | 2085 | return 0; |
2086 | VM_BUG_ON(vma->vm_flags & VM_NO_THP); | 2086 | VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); |
2087 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 2087 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
2088 | hend = vma->vm_end & HPAGE_PMD_MASK; | 2088 | hend = vma->vm_end & HPAGE_PMD_MASK; |
2089 | if (hstart < hend) | 2089 | if (hstart < hend) |
@@ -2406,7 +2406,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) | |||
2406 | return false; | 2406 | return false; |
2407 | if (is_vma_temporary_stack(vma)) | 2407 | if (is_vma_temporary_stack(vma)) |
2408 | return false; | 2408 | return false; |
2409 | VM_BUG_ON(vma->vm_flags & VM_NO_THP); | 2409 | VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); |
2410 | return true; | 2410 | return true; |
2411 | } | 2411 | } |
2412 | 2412 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eeceeeb09019..9fd722769927 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -434,7 +434,7 @@ static inline struct resv_map *inode_resv_map(struct inode *inode) | |||
434 | 434 | ||
435 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | 435 | static struct resv_map *vma_resv_map(struct vm_area_struct *vma) |
436 | { | 436 | { |
437 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 437 | VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); |
438 | if (vma->vm_flags & VM_MAYSHARE) { | 438 | if (vma->vm_flags & VM_MAYSHARE) { |
439 | struct address_space *mapping = vma->vm_file->f_mapping; | 439 | struct address_space *mapping = vma->vm_file->f_mapping; |
440 | struct inode *inode = mapping->host; | 440 | struct inode *inode = mapping->host; |
@@ -449,8 +449,8 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) | |||
449 | 449 | ||
450 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | 450 | static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) |
451 | { | 451 | { |
452 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 452 | VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); |
453 | VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); | 453 | VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); |
454 | 454 | ||
455 | set_vma_private_data(vma, (get_vma_private_data(vma) & | 455 | set_vma_private_data(vma, (get_vma_private_data(vma) & |
456 | HPAGE_RESV_MASK) | (unsigned long)map); | 456 | HPAGE_RESV_MASK) | (unsigned long)map); |
@@ -458,15 +458,15 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) | |||
458 | 458 | ||
459 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) | 459 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) |
460 | { | 460 | { |
461 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 461 | VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); |
462 | VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); | 462 | VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); |
463 | 463 | ||
464 | set_vma_private_data(vma, get_vma_private_data(vma) | flags); | 464 | set_vma_private_data(vma, get_vma_private_data(vma) | flags); |
465 | } | 465 | } |
466 | 466 | ||
467 | static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) | 467 | static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) |
468 | { | 468 | { |
469 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 469 | VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); |
470 | 470 | ||
471 | return (get_vma_private_data(vma) & flag) != 0; | 471 | return (get_vma_private_data(vma) & flag) != 0; |
472 | } | 472 | } |
@@ -474,7 +474,7 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) | |||
474 | /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ | 474 | /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ |
475 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 475 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
476 | { | 476 | { |
477 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 477 | VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); |
478 | if (!(vma->vm_flags & VM_MAYSHARE)) | 478 | if (!(vma->vm_flags & VM_MAYSHARE)) |
479 | vma->vm_private_data = (void *)0; | 479 | vma->vm_private_data = (void *)0; |
480 | } | 480 | } |
diff --git a/mm/interval_tree.c b/mm/interval_tree.c index 4a5822a586e6..8da581fa9060 100644 --- a/mm/interval_tree.c +++ b/mm/interval_tree.c | |||
@@ -34,7 +34,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node, | |||
34 | struct vm_area_struct *parent; | 34 | struct vm_area_struct *parent; |
35 | unsigned long last = vma_last_pgoff(node); | 35 | unsigned long last = vma_last_pgoff(node); |
36 | 36 | ||
37 | VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev)); | 37 | VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); |
38 | 38 | ||
39 | if (!prev->shared.linear.rb.rb_right) { | 39 | if (!prev->shared.linear.rb.rb_right) { |
40 | parent = prev; | 40 | parent = prev; |
diff --git a/mm/mlock.c b/mm/mlock.c index ce84cb0b83ef..d5d09d0786ec 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -233,8 +233,8 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
233 | 233 | ||
234 | VM_BUG_ON(start & ~PAGE_MASK); | 234 | VM_BUG_ON(start & ~PAGE_MASK); |
235 | VM_BUG_ON(end & ~PAGE_MASK); | 235 | VM_BUG_ON(end & ~PAGE_MASK); |
236 | VM_BUG_ON(start < vma->vm_start); | 236 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
237 | VM_BUG_ON(end > vma->vm_end); | 237 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
238 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 238 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
239 | 239 | ||
240 | gup_flags = FOLL_TOUCH | FOLL_MLOCK; | 240 | gup_flags = FOLL_TOUCH | FOLL_MLOCK; |
@@ -786,8 +786,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
786 | if (!anon_vma && adjust_next) | 786 | if (!anon_vma && adjust_next) |
787 | anon_vma = next->anon_vma; | 787 | anon_vma = next->anon_vma; |
788 | if (anon_vma) { | 788 | if (anon_vma) { |
789 | VM_BUG_ON(adjust_next && next->anon_vma && | 789 | VM_BUG_ON_VMA(adjust_next && next->anon_vma && |
790 | anon_vma != next->anon_vma); | 790 | anon_vma != next->anon_vma, next); |
791 | anon_vma_lock_write(anon_vma); | 791 | anon_vma_lock_write(anon_vma); |
792 | anon_vma_interval_tree_pre_update_vma(vma); | 792 | anon_vma_interval_tree_pre_update_vma(vma); |
793 | if (adjust_next) | 793 | if (adjust_next) |
@@ -2848,7 +2848,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2848 | * safe. It is only safe to keep the vm_pgoff | 2848 | * safe. It is only safe to keep the vm_pgoff |
2849 | * linear if there are no pages mapped yet. | 2849 | * linear if there are no pages mapped yet. |
2850 | */ | 2850 | */ |
2851 | VM_BUG_ON(faulted_in_anon_vma); | 2851 | VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); |
2852 | *vmap = vma = new_vma; | 2852 | *vmap = vma = new_vma; |
2853 | } | 2853 | } |
2854 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); | 2854 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); |
diff --git a/mm/mremap.c b/mm/mremap.c index 05f1180e9f21..89e45d8a983a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -195,7 +195,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
195 | if (pmd_trans_huge(*old_pmd)) { | 195 | if (pmd_trans_huge(*old_pmd)) { |
196 | int err = 0; | 196 | int err = 0; |
197 | if (extent == HPAGE_PMD_SIZE) { | 197 | if (extent == HPAGE_PMD_SIZE) { |
198 | VM_BUG_ON(vma->vm_file || !vma->anon_vma); | 198 | VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, |
199 | vma); | ||
199 | /* See comment in move_ptes() */ | 200 | /* See comment in move_ptes() */ |
200 | if (need_rmap_locks) | 201 | if (need_rmap_locks) |
201 | anon_vma_lock_write(vma->anon_vma); | 202 | anon_vma_lock_write(vma->anon_vma); |
@@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
527 | unsigned long address = __vma_address(page, vma); | 527 | unsigned long address = __vma_address(page, vma); |
528 | 528 | ||
529 | /* page should be within @vma mapping range */ | 529 | /* page should be within @vma mapping range */ |
530 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 530 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
531 | 531 | ||
532 | return address; | 532 | return address; |
533 | } | 533 | } |
@@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page, | |||
897 | struct anon_vma *anon_vma = vma->anon_vma; | 897 | struct anon_vma *anon_vma = vma->anon_vma; |
898 | 898 | ||
899 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 899 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
900 | VM_BUG_ON(!anon_vma); | 900 | VM_BUG_ON_VMA(!anon_vma, vma); |
901 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); | 901 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); |
902 | 902 | ||
903 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 903 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
@@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
1024 | void page_add_new_anon_rmap(struct page *page, | 1024 | void page_add_new_anon_rmap(struct page *page, |
1025 | struct vm_area_struct *vma, unsigned long address) | 1025 | struct vm_area_struct *vma, unsigned long address) |
1026 | { | 1026 | { |
1027 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1027 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
1028 | SetPageSwapBacked(page); | 1028 | SetPageSwapBacked(page); |
1029 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | 1029 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ |
1030 | if (PageTransHuge(page)) | 1030 | if (PageTransHuge(page)) |
@@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) | |||
1670 | * structure at mapping cannot be freed and reused yet, | 1670 | * structure at mapping cannot be freed and reused yet, |
1671 | * so we can safely take mapping->i_mmap_mutex. | 1671 | * so we can safely take mapping->i_mmap_mutex. |
1672 | */ | 1672 | */ |
1673 | VM_BUG_ON(!PageLocked(page)); | 1673 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1674 | 1674 | ||
1675 | if (!mapping) | 1675 | if (!mapping) |
1676 | return ret; | 1676 | return ret; |