aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c15
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/vmalloc.c3
6 files changed, 17 insertions, 33 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 55885f64af40..9b274fdf9d08 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
74 pkmap_count[i] = 0; 74 pkmap_count[i] = 0;
75 75
76 /* sanity check */ 76 /* sanity check */
77 if (pte_none(pkmap_page_table[i])) 77 BUG_ON(pte_none(pkmap_page_table[i]));
78 BUG();
79 78
80 /* 79 /*
81 * Don't need an atomic fetch-and-clear op here; 80 * Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
158 if (!vaddr) 157 if (!vaddr)
159 vaddr = map_new_virtual(page); 158 vaddr = map_new_virtual(page);
160 pkmap_count[PKMAP_NR(vaddr)]++; 159 pkmap_count[PKMAP_NR(vaddr)]++;
161 if (pkmap_count[PKMAP_NR(vaddr)] < 2) 160 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
162 BUG();
163 spin_unlock(&kmap_lock); 161 spin_unlock(&kmap_lock);
164 return (void*) vaddr; 162 return (void*) vaddr;
165} 163}
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
174 172
175 spin_lock(&kmap_lock); 173 spin_lock(&kmap_lock);
176 vaddr = (unsigned long)page_address(page); 174 vaddr = (unsigned long)page_address(page);
177 if (!vaddr) 175 BUG_ON(!vaddr);
178 BUG();
179 nr = PKMAP_NR(vaddr); 176 nr = PKMAP_NR(vaddr);
180 177
181 /* 178 /*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
220 return 0; 217 return 0;
221 218
222 page_pool = mempool_create_page_pool(POOL_SIZE, 0); 219 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
223 if (!page_pool) 220 BUG_ON(!page_pool);
224 BUG();
225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 221 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
226 222
227 return 0; 223 return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
264 260
265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, 261 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0); 262 mempool_free_pages, (void *) 0);
267 if (!isa_page_pool) 263 BUG_ON(!isa_page_pool);
268 BUG();
269 264
270 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 265 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
271 return 0; 266 return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 4f5b5709136a..e780d19aa214 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
294 i = browse_rb(&mm->mm_rb); 294 i = browse_rb(&mm->mm_rb);
295 if (i != mm->map_count) 295 if (i != mm->map_count)
296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
297 if (bug) 297 BUG_ON(bug);
298 BUG();
299} 298}
300#else 299#else
301#define validate_mm(mm) do { } while (0) 300#define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
432 struct rb_node ** rb_link, * rb_parent; 431 struct rb_node ** rb_link, * rb_parent;
433 432
434 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 433 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
435 if (__vma && __vma->vm_start < vma->vm_end) 434 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
436 BUG();
437 __vma_link(mm, vma, prev, rb_link, rb_parent); 435 __vma_link(mm, vma, prev, rb_link, rb_parent);
438 mm->map_count++; 436 mm->map_count++;
439} 437}
@@ -813,8 +811,7 @@ try_prev:
813 * (e.g. stash info in next's anon_vma_node when assigning 811 * (e.g. stash info in next's anon_vma_node when assigning
814 * an anon_vma, or when trying vma_merge). Another time. 812 * an anon_vma, or when trying vma_merge). Another time.
815 */ 813 */
816 if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) 814 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
817 BUG();
818 if (!near) 815 if (!near)
819 goto none; 816 goto none;
820 817
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 893d7677579e..6dcce3a4bbdc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
258/** 258/**
259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied 260 * @mapping: address_space which was dirtied
261 * @nr_pages: number of pages which the caller has just dirtied 261 * @nr_pages_dirtied: number of pages which the caller has just dirtied
262 * 262 *
263 * Processes which are dirtying memory should call in here once for each page 263 * Processes which are dirtying memory should call in here once for each page
264 * which was newly dirtied. The function will periodically check the system's 264 * which was newly dirtied. The function will periodically check the system's
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb13557..f055c1420216 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
1297 if (cache_cache.num) 1297 if (cache_cache.num)
1298 break; 1298 break;
1299 } 1299 }
1300 if (!cache_cache.num) 1300 BUG_ON(!cache_cache.num);
1301 BUG();
1302 cache_cache.gfporder = order; 1301 cache_cache.gfporder = order;
1303 cache_cache.colour = left_over / cache_cache.colour_off; 1302 cache_cache.colour = left_over / cache_cache.colour_off;
1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1974 * Always checks flags, a caller might be expecting debug support which 1973 * Always checks flags, a caller might be expecting debug support which
1975 * isn't available. 1974 * isn't available.
1976 */ 1975 */
1977 if (flags & ~CREATE_MASK) 1976 BUG_ON(flags & ~CREATE_MASK);
1978 BUG();
1979 1977
1980 /* 1978 /*
1981 * Check that size is in terms of words. This is needed to avoid 1979 * Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
2206 2204
2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2208#if DEBUG 2206#if DEBUG
2209 if (slabp->inuse) 2207 BUG_ON(slabp->inuse);
2210 BUG();
2211#endif 2208#endif
2212 list_del(&slabp->list); 2209 list_del(&slabp->list);
2213 2210
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2248 */ 2245 */
2249int kmem_cache_shrink(struct kmem_cache *cachep) 2246int kmem_cache_shrink(struct kmem_cache *cachep)
2250{ 2247{
2251 if (!cachep || in_interrupt()) 2248 BUG_ON(!cachep || in_interrupt());
2252 BUG();
2253 2249
2254 return __cache_shrink(cachep); 2250 return __cache_shrink(cachep);
2255} 2251}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2277 int i; 2273 int i;
2278 struct kmem_list3 *l3; 2274 struct kmem_list3 *l3;
2279 2275
2280 if (!cachep || in_interrupt()) 2276 BUG_ON(!cachep || in_interrupt());
2281 BUG();
2282 2277
2283 /* Don't let CPUs to come and go */ 2278 /* Don't let CPUs to come and go */
2284 lock_cpu_hotplug(); 2279 lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2477 * Be lazy and only check for valid flags here, keeping it out of the 2472 * Be lazy and only check for valid flags here, keeping it out of the
2478 * critical path in kmem_cache_alloc(). 2473 * critical path in kmem_cache_alloc().
2479 */ 2474 */
2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2475 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2481 BUG();
2482 if (flags & SLAB_NO_GROW) 2476 if (flags & SLAB_NO_GROW)
2483 return 0; 2477 return 0;
2484 2478
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d7af296833fc..e0e1583f32c2 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
148 swp_entry_t entry; 148 swp_entry_t entry;
149 int err; 149 int err;
150 150
151 if (!PageLocked(page)) 151 BUG_ON(!PageLocked(page));
152 BUG();
153 152
154 for (;;) { 153 for (;;) {
155 entry = get_swap_page(); 154 entry = get_swap_page();
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 729eb3eec75f..c0504f1e34eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
321 int i; 321 int i;
322 322
323 for (i = 0; i < area->nr_pages; i++) { 323 for (i = 0; i < area->nr_pages; i++) {
324 if (unlikely(!area->pages[i])) 324 BUG_ON(!area->pages[i]);
325 BUG();
326 __free_page(area->pages[i]); 325 __free_page(area->pages[i]);
327 } 326 }
328 327