aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorEric Sesterhenn <snakebyte@gmx.de>2006-04-02 07:49:25 -0400
committerAdrian Bunk <bunk@stusta.de>2006-04-02 07:49:25 -0400
commit40094fa65238291d51839326320aba997092ab1f (patch)
tree37cb650caea9290dbecc27ab5732cd95f5e460d0 /mm/slab.c
parent75babcacede876608f14ef1a20e795ce17ae637f (diff)
BUG_ON() Conversion in mm/slab.c
this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de> Signed-off-by: Adrian Bunk <bunk@stusta.de>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb13557..f055c1420216 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
1297 if (cache_cache.num) 1297 if (cache_cache.num)
1298 break; 1298 break;
1299 } 1299 }
1300 if (!cache_cache.num) 1300 BUG_ON(!cache_cache.num);
1301 BUG();
1302 cache_cache.gfporder = order; 1301 cache_cache.gfporder = order;
1303 cache_cache.colour = left_over / cache_cache.colour_off; 1302 cache_cache.colour = left_over / cache_cache.colour_off;
1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1974 * Always checks flags, a caller might be expecting debug support which 1973 * Always checks flags, a caller might be expecting debug support which
1975 * isn't available. 1974 * isn't available.
1976 */ 1975 */
1977 if (flags & ~CREATE_MASK) 1976 BUG_ON(flags & ~CREATE_MASK);
1978 BUG();
1979 1977
1980 /* 1978 /*
1981 * Check that size is in terms of words. This is needed to avoid 1979 * Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
2206 2204
2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2208#if DEBUG 2206#if DEBUG
2209 if (slabp->inuse) 2207 BUG_ON(slabp->inuse);
2210 BUG();
2211#endif 2208#endif
2212 list_del(&slabp->list); 2209 list_del(&slabp->list);
2213 2210
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2248 */ 2245 */
2249int kmem_cache_shrink(struct kmem_cache *cachep) 2246int kmem_cache_shrink(struct kmem_cache *cachep)
2250{ 2247{
2251 if (!cachep || in_interrupt()) 2248 BUG_ON(!cachep || in_interrupt());
2252 BUG();
2253 2249
2254 return __cache_shrink(cachep); 2250 return __cache_shrink(cachep);
2255} 2251}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2277 int i; 2273 int i;
2278 struct kmem_list3 *l3; 2274 struct kmem_list3 *l3;
2279 2275
2280 if (!cachep || in_interrupt()) 2276 BUG_ON(!cachep || in_interrupt());
2281 BUG();
2282 2277
2283 /* Don't let CPUs to come and go */ 2278 /* Don't let CPUs to come and go */
2284 lock_cpu_hotplug(); 2279 lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2477 * Be lazy and only check for valid flags here, keeping it out of the 2472 * Be lazy and only check for valid flags here, keeping it out of the
2478 * critical path in kmem_cache_alloc(). 2473 * critical path in kmem_cache_alloc().
2479 */ 2474 */
2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2475 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2481 BUG();
2482 if (flags & SLAB_NO_GROW) 2476 if (flags & SLAB_NO_GROW)
2483 return 0; 2477 return 0;
2484 2478