diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | 6cb8f91320d3e720351c21741da795fed580b21b (patch) | |
tree | c9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d /mm | |
parent | ef2ad80c7d255ed0449eda947c2d700635b7e0f5 (diff) |
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the
allocators. Move ZERO_SIZE_PTR related stuff into slab.h.
Make ZERO_SIZE_PTR work for all slab allocators and get rid of the
WARN_ON_ONCE(size == 0) that is still remaining in SLAB.
Make slub return NULL like the other allocators if a too large memory segment
is requested via __kmalloc.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 13 | ||||
-rw-r--r-- | mm/slob.c | 11 | ||||
-rw-r--r-- | mm/slub.c | 29 | ||||
-rw-r--r-- | mm/util.c | 2 |
4 files changed, 32 insertions, 23 deletions
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
775 | */ | 775 | */ |
776 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 776 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); |
777 | #endif | 777 | #endif |
778 | if (!size) | ||
779 | return ZERO_SIZE_PTR; | ||
780 | |||
778 | while (size > csizep->cs_size) | 781 | while (size > csizep->cs_size) |
779 | csizep++; | 782 | csizep++; |
780 | 783 | ||
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2351 | * this should not happen at all. | 2354 | * this should not happen at all. |
2352 | * But leave a BUG_ON for some lucky dude. | 2355 | * But leave a BUG_ON for some lucky dude. |
2353 | */ | 2356 | */ |
2354 | BUG_ON(!cachep->slabp_cache); | 2357 | BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); |
2355 | } | 2358 | } |
2356 | cachep->ctor = ctor; | 2359 | cachep->ctor = ctor; |
2357 | cachep->name = name; | 2360 | cachep->name = name; |
@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | |||
3653 | struct kmem_cache *cachep; | 3656 | struct kmem_cache *cachep; |
3654 | 3657 | ||
3655 | cachep = kmem_find_general_cachep(size, flags); | 3658 | cachep = kmem_find_general_cachep(size, flags); |
3656 | if (unlikely(cachep == NULL)) | 3659 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3657 | return NULL; | 3660 | return cachep; |
3658 | return kmem_cache_alloc_node(cachep, flags, node); | 3661 | return kmem_cache_alloc_node(cachep, flags, node); |
3659 | } | 3662 | } |
3660 | 3663 | ||
@@ -3760,7 +3763,7 @@ void kfree(const void *objp) | |||
3760 | struct kmem_cache *c; | 3763 | struct kmem_cache *c; |
3761 | unsigned long flags; | 3764 | unsigned long flags; |
3762 | 3765 | ||
3763 | if (unlikely(!objp)) | 3766 | if (unlikely(ZERO_OR_NULL_PTR(objp))) |
3764 | return; | 3767 | return; |
3765 | local_irq_save(flags); | 3768 | local_irq_save(flags); |
3766 | kfree_debugcheck(objp); | 3769 | kfree_debugcheck(objp); |
@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = { | |||
4447 | */ | 4450 | */ |
4448 | size_t ksize(const void *objp) | 4451 | size_t ksize(const void *objp) |
4449 | { | 4452 | { |
4450 | if (unlikely(objp == NULL)) | 4453 | if (unlikely(ZERO_OR_NULL_PTR(objp))) |
4451 | return 0; | 4454 | return 0; |
4452 | 4455 | ||
4453 | return obj_size(virt_to_cache(objp)); | 4456 | return obj_size(virt_to_cache(objp)); |
@@ -347,7 +347,7 @@ static void slob_free(void *block, int size) | |||
347 | slobidx_t units; | 347 | slobidx_t units; |
348 | unsigned long flags; | 348 | unsigned long flags; |
349 | 349 | ||
350 | if (!block) | 350 | if (ZERO_OR_NULL_PTR(block)) |
351 | return; | 351 | return; |
352 | BUG_ON(!size); | 352 | BUG_ON(!size); |
353 | 353 | ||
@@ -424,10 +424,13 @@ out: | |||
424 | 424 | ||
425 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 425 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) |
426 | { | 426 | { |
427 | unsigned int *m; | ||
427 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 428 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
428 | 429 | ||
429 | if (size < PAGE_SIZE - align) { | 430 | if (size < PAGE_SIZE - align) { |
430 | unsigned int *m; | 431 | if (!size) |
432 | return ZERO_SIZE_PTR; | ||
433 | |||
431 | m = slob_alloc(size + align, gfp, align, node); | 434 | m = slob_alloc(size + align, gfp, align, node); |
432 | if (m) | 435 | if (m) |
433 | *m = size; | 436 | *m = size; |
@@ -450,7 +453,7 @@ void kfree(const void *block) | |||
450 | { | 453 | { |
451 | struct slob_page *sp; | 454 | struct slob_page *sp; |
452 | 455 | ||
453 | if (!block) | 456 | if (ZERO_OR_NULL_PTR(block)) |
454 | return; | 457 | return; |
455 | 458 | ||
456 | sp = (struct slob_page *)virt_to_page(block); | 459 | sp = (struct slob_page *)virt_to_page(block); |
@@ -468,7 +471,7 @@ size_t ksize(const void *block) | |||
468 | { | 471 | { |
469 | struct slob_page *sp; | 472 | struct slob_page *sp; |
470 | 473 | ||
471 | if (!block) | 474 | if (ZERO_OR_NULL_PTR(block)) |
472 | return 0; | 475 | return 0; |
473 | 476 | ||
474 | sp = (struct slob_page *)virt_to_page(block); | 477 | sp = (struct slob_page *)virt_to_page(block); |
@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2270 | int index = kmalloc_index(size); | 2270 | int index = kmalloc_index(size); |
2271 | 2271 | ||
2272 | if (!index) | 2272 | if (!index) |
2273 | return NULL; | 2273 | return ZERO_SIZE_PTR; |
2274 | 2274 | ||
2275 | /* Allocation too large? */ | 2275 | /* Allocation too large? */ |
2276 | BUG_ON(index < 0); | 2276 | if (index < 0) |
2277 | return NULL; | ||
2277 | 2278 | ||
2278 | #ifdef CONFIG_ZONE_DMA | 2279 | #ifdef CONFIG_ZONE_DMA |
2279 | if ((flags & SLUB_DMA)) { | 2280 | if ((flags & SLUB_DMA)) { |
@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2314 | { | 2315 | { |
2315 | struct kmem_cache *s = get_slab(size, flags); | 2316 | struct kmem_cache *s = get_slab(size, flags); |
2316 | 2317 | ||
2317 | if (s) | 2318 | if (ZERO_OR_NULL_PTR(s)) |
2318 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2319 | return s; |
2319 | return ZERO_SIZE_PTR; | 2320 | |
2321 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | ||
2320 | } | 2322 | } |
2321 | EXPORT_SYMBOL(__kmalloc); | 2323 | EXPORT_SYMBOL(__kmalloc); |
2322 | 2324 | ||
@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2325 | { | 2327 | { |
2326 | struct kmem_cache *s = get_slab(size, flags); | 2328 | struct kmem_cache *s = get_slab(size, flags); |
2327 | 2329 | ||
2328 | if (s) | 2330 | if (ZERO_OR_NULL_PTR(s)) |
2329 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2331 | return s; |
2330 | return ZERO_SIZE_PTR; | 2332 | |
2333 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | ||
2331 | } | 2334 | } |
2332 | EXPORT_SYMBOL(__kmalloc_node); | 2335 | EXPORT_SYMBOL(__kmalloc_node); |
2333 | #endif | 2336 | #endif |
@@ -2378,7 +2381,7 @@ void kfree(const void *x) | |||
2378 | * this comparison would be true for all "negative" pointers | 2381 | * this comparison would be true for all "negative" pointers |
2379 | * (which would cover the whole upper half of the address space). | 2382 | * (which would cover the whole upper half of the address space). |
2380 | */ | 2383 | */ |
2381 | if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) | 2384 | if (ZERO_OR_NULL_PTR(x)) |
2382 | return; | 2385 | return; |
2383 | 2386 | ||
2384 | page = virt_to_head_page(x); | 2387 | page = virt_to_head_page(x); |
@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2687 | { | 2690 | { |
2688 | struct kmem_cache *s = get_slab(size, gfpflags); | 2691 | struct kmem_cache *s = get_slab(size, gfpflags); |
2689 | 2692 | ||
2690 | if (!s) | 2693 | if (ZERO_OR_NULL_PTR(s)) |
2691 | return ZERO_SIZE_PTR; | 2694 | return s; |
2692 | 2695 | ||
2693 | return slab_alloc(s, gfpflags, -1, caller); | 2696 | return slab_alloc(s, gfpflags, -1, caller); |
2694 | } | 2697 | } |
@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2698 | { | 2701 | { |
2699 | struct kmem_cache *s = get_slab(size, gfpflags); | 2702 | struct kmem_cache *s = get_slab(size, gfpflags); |
2700 | 2703 | ||
2701 | if (!s) | 2704 | if (ZERO_OR_NULL_PTR(s)) |
2702 | return ZERO_SIZE_PTR; | 2705 | return s; |
2703 | 2706 | ||
2704 | return slab_alloc(s, gfpflags, node, caller); | 2707 | return slab_alloc(s, gfpflags, node, caller); |
2705 | } | 2708 | } |
@@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
76 | 76 | ||
77 | if (unlikely(!new_size)) { | 77 | if (unlikely(!new_size)) { |
78 | kfree(p); | 78 | kfree(p); |
79 | return NULL; | 79 | return ZERO_SIZE_PTR; |
80 | } | 80 | } |
81 | 81 | ||
82 | ks = ksize(p); | 82 | ks = ksize(p); |