diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2014-10-09 18:28:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:57 -0400 |
commit | 81d1b09c6be66afac7d41ee52279d9bccbce56d8 (patch) | |
tree | 22bc3aa61748eb31bff7a101a3028f45231561eb /mm/rmap.c | |
parent | fa3759ccd5651c4235f572302d58c8ec9ddf1c4b (diff) |
mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA
Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract
more information when they trigger.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michel Lespinasse <walken@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 8 |
1 files changed, 4 insertions, 4 deletions
@@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
527 | unsigned long address = __vma_address(page, vma); | 527 | unsigned long address = __vma_address(page, vma); |
528 | 528 | ||
529 | /* page should be within @vma mapping range */ | 529 | /* page should be within @vma mapping range */ |
530 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 530 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
531 | 531 | ||
532 | return address; | 532 | return address; |
533 | } | 533 | } |
@@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page, | |||
897 | struct anon_vma *anon_vma = vma->anon_vma; | 897 | struct anon_vma *anon_vma = vma->anon_vma; |
898 | 898 | ||
899 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 899 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
900 | VM_BUG_ON(!anon_vma); | 900 | VM_BUG_ON_VMA(!anon_vma, vma); |
901 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); | 901 | VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); |
902 | 902 | ||
903 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 903 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
@@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
1024 | void page_add_new_anon_rmap(struct page *page, | 1024 | void page_add_new_anon_rmap(struct page *page, |
1025 | struct vm_area_struct *vma, unsigned long address) | 1025 | struct vm_area_struct *vma, unsigned long address) |
1026 | { | 1026 | { |
1027 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 1027 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
1028 | SetPageSwapBacked(page); | 1028 | SetPageSwapBacked(page); |
1029 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ | 1029 | atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ |
1030 | if (PageTransHuge(page)) | 1030 | if (PageTransHuge(page)) |
@@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) | |||
1670 | * structure at mapping cannot be freed and reused yet, | 1670 | * structure at mapping cannot be freed and reused yet, |
1671 | * so we can safely take mapping->i_mmap_mutex. | 1671 | * so we can safely take mapping->i_mmap_mutex. |
1672 | */ | 1672 | */ |
1673 | VM_BUG_ON(!PageLocked(page)); | 1673 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
1674 | 1674 | ||
1675 | if (!mapping) | 1675 | if (!mapping) |
1676 | return ret; | 1676 | return ret; |