aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:22 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commit6cb8f91320d3e720351c21741da795fed580b21b (patch)
treec9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d /mm/slab.c
parentef2ad80c7d255ed0449eda947c2d700635b7e0f5 (diff)
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the allocators. Move ZERO_SIZE_PTR related stuff into slab.h. Make ZERO_SIZE_PTR work for all slab allocators and get rid of the WARN_ON_ONCE(size == 0) that is still remaining in SLAB. Make slub return NULL like the other allocators if a too large memory segment is requested via __kmalloc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4bd8a53091b7..d2cd304fd8af 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
775 */ 775 */
776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
777#endif 777#endif
778 if (!size)
779 return ZERO_SIZE_PTR;
780
778 while (size > csizep->cs_size) 781 while (size > csizep->cs_size)
779 csizep++; 782 csizep++;
780 783
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2351 * this should not happen at all. 2354 * this should not happen at all.
2352 * But leave a BUG_ON for some lucky dude. 2355 * But leave a BUG_ON for some lucky dude.
2353 */ 2356 */
2354 BUG_ON(!cachep->slabp_cache); 2357 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2355 } 2358 }
2356 cachep->ctor = ctor; 2359 cachep->ctor = ctor;
2357 cachep->name = name; 2360 cachep->name = name;
@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3653 struct kmem_cache *cachep; 3656 struct kmem_cache *cachep;
3654 3657
3655 cachep = kmem_find_general_cachep(size, flags); 3658 cachep = kmem_find_general_cachep(size, flags);
3656 if (unlikely(cachep == NULL)) 3659 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3657 return NULL; 3660 return cachep;
3658 return kmem_cache_alloc_node(cachep, flags, node); 3661 return kmem_cache_alloc_node(cachep, flags, node);
3659} 3662}
3660 3663
@@ -3760,7 +3763,7 @@ void kfree(const void *objp)
3760 struct kmem_cache *c; 3763 struct kmem_cache *c;
3761 unsigned long flags; 3764 unsigned long flags;
3762 3765
3763 if (unlikely(!objp)) 3766 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3764 return; 3767 return;
3765 local_irq_save(flags); 3768 local_irq_save(flags);
3766 kfree_debugcheck(objp); 3769 kfree_debugcheck(objp);
@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = {
4447 */ 4450 */
4448size_t ksize(const void *objp) 4451size_t ksize(const void *objp)
4449{ 4452{
4450 if (unlikely(objp == NULL)) 4453 if (unlikely(ZERO_OR_NULL_PTR(objp)))
4451 return 0; 4454 return 0;
4452 4455
4453 return obj_size(virt_to_cache(objp)); 4456 return obj_size(virt_to_cache(objp));