diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 07:03:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 13:23:01 -0400 |
commit | 6cb8f91320d3e720351c21741da795fed580b21b (patch) | |
tree | c9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d /mm/slob.c | |
parent | ef2ad80c7d255ed0449eda947c2d700635b7e0f5 (diff) |
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the
allocators. Move ZERO_SIZE_PTR related stuff into slab.h.
Make ZERO_SIZE_PTR work for all slab allocators and get rid of the
WARN_ON_ONCE(size == 0) that is still remaining in SLAB.
Make slub return NULL like the other allocators if a too large memory segment
is requested via __kmalloc.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 11 |
1 files changed, 7 insertions, 4 deletions
@@ -347,7 +347,7 @@ static void slob_free(void *block, int size) | |||
347 | slobidx_t units; | 347 | slobidx_t units; |
348 | unsigned long flags; | 348 | unsigned long flags; |
349 | 349 | ||
350 | if (!block) | 350 | if (ZERO_OR_NULL_PTR(block)) |
351 | return; | 351 | return; |
352 | BUG_ON(!size); | 352 | BUG_ON(!size); |
353 | 353 | ||
@@ -424,10 +424,13 @@ out: | |||
424 | 424 | ||
425 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 425 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) |
426 | { | 426 | { |
427 | unsigned int *m; | ||
427 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 428 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
428 | 429 | ||
429 | if (size < PAGE_SIZE - align) { | 430 | if (size < PAGE_SIZE - align) { |
430 | unsigned int *m; | 431 | if (!size) |
432 | return ZERO_SIZE_PTR; | ||
433 | |||
431 | m = slob_alloc(size + align, gfp, align, node); | 434 | m = slob_alloc(size + align, gfp, align, node); |
432 | if (m) | 435 | if (m) |
433 | *m = size; | 436 | *m = size; |
@@ -450,7 +453,7 @@ void kfree(const void *block) | |||
450 | { | 453 | { |
451 | struct slob_page *sp; | 454 | struct slob_page *sp; |
452 | 455 | ||
453 | if (!block) | 456 | if (ZERO_OR_NULL_PTR(block)) |
454 | return; | 457 | return; |
455 | 458 | ||
456 | sp = (struct slob_page *)virt_to_page(block); | 459 | sp = (struct slob_page *)virt_to_page(block); |
@@ -468,7 +471,7 @@ size_t ksize(const void *block) | |||
468 | { | 471 | { |
469 | struct slob_page *sp; | 472 | struct slob_page *sp; |
470 | 473 | ||
471 | if (!block) | 474 | if (ZERO_OR_NULL_PTR(block)) |
472 | return 0; | 475 | return 0; |
473 | 476 | ||
474 | sp = (struct slob_page *)virt_to_page(block); | 477 | sp = (struct slob_page *)virt_to_page(block); |