diff options
author | Satyam Sharma <satyam@infradead.org> | 2007-10-16 04:24:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:53 -0400 |
commit | 2408c55037c3f7d51a8a100025c47595e71b838c (patch) | |
tree | 71940f72dbadf6a6744ffbd062ef4fd8754aa623 | |
parent | c92ff1bde06f69d59b40f3194016aee51cc5da55 (diff) |
{slub, slob}: use unlikely() for kfree(ZERO_OR_NULL_PTR) check
Considering kfree(NULL) would normally occur only in error paths and
kfree(ZERO_SIZE_PTR) is uncommon as well, so let's use unlikely() for the
condition check in SLUB's and SLOB's kfree() to optimize for the common
case. SLAB has this already.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slob.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 8 |
2 files changed, 7 insertions, 7 deletions
@@ -360,7 +360,7 @@ static void slob_free(void *block, int size) | |||
360 | slobidx_t units; | 360 | slobidx_t units; |
361 | unsigned long flags; | 361 | unsigned long flags; |
362 | 362 | ||
363 | if (ZERO_OR_NULL_PTR(block)) | 363 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
364 | return; | 364 | return; |
365 | BUG_ON(!size); | 365 | BUG_ON(!size); |
366 | 366 | ||
@@ -466,7 +466,7 @@ void kfree(const void *block) | |||
466 | { | 466 | { |
467 | struct slob_page *sp; | 467 | struct slob_page *sp; |
468 | 468 | ||
469 | if (ZERO_OR_NULL_PTR(block)) | 469 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
470 | return; | 470 | return; |
471 | 471 | ||
472 | sp = (struct slob_page *)virt_to_page(block); | 472 | sp = (struct slob_page *)virt_to_page(block); |
@@ -484,7 +484,7 @@ size_t ksize(const void *block) | |||
484 | { | 484 | { |
485 | struct slob_page *sp; | 485 | struct slob_page *sp; |
486 | 486 | ||
487 | if (ZERO_OR_NULL_PTR(block)) | 487 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
488 | return 0; | 488 | return 0; |
489 | 489 | ||
490 | sp = (struct slob_page *)virt_to_page(block); | 490 | sp = (struct slob_page *)virt_to_page(block); |
@@ -2449,7 +2449,7 @@ size_t ksize(const void *object) | |||
2449 | struct page *page; | 2449 | struct page *page; |
2450 | struct kmem_cache *s; | 2450 | struct kmem_cache *s; |
2451 | 2451 | ||
2452 | if (ZERO_OR_NULL_PTR(object)) | 2452 | if (unlikely(ZERO_OR_NULL_PTR(object))) |
2453 | return 0; | 2453 | return 0; |
2454 | 2454 | ||
2455 | page = get_object_page(object); | 2455 | page = get_object_page(object); |
@@ -2483,7 +2483,7 @@ void kfree(const void *x) | |||
2483 | { | 2483 | { |
2484 | struct page *page; | 2484 | struct page *page; |
2485 | 2485 | ||
2486 | if (ZERO_OR_NULL_PTR(x)) | 2486 | if (unlikely(ZERO_OR_NULL_PTR(x))) |
2487 | return; | 2487 | return; |
2488 | 2488 | ||
2489 | page = virt_to_head_page(x); | 2489 | page = virt_to_head_page(x); |
@@ -2800,7 +2800,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2800 | get_order(size)); | 2800 | get_order(size)); |
2801 | s = get_slab(size, gfpflags); | 2801 | s = get_slab(size, gfpflags); |
2802 | 2802 | ||
2803 | if (ZERO_OR_NULL_PTR(s)) | 2803 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2804 | return s; | 2804 | return s; |
2805 | 2805 | ||
2806 | return slab_alloc(s, gfpflags, -1, caller); | 2806 | return slab_alloc(s, gfpflags, -1, caller); |
@@ -2816,7 +2816,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2816 | get_order(size)); | 2816 | get_order(size)); |
2817 | s = get_slab(size, gfpflags); | 2817 | s = get_slab(size, gfpflags); |
2818 | 2818 | ||
2819 | if (ZERO_OR_NULL_PTR(s)) | 2819 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2820 | return s; | 2820 | return s; |
2821 | 2821 | ||
2822 | return slab_alloc(s, gfpflags, node, caller); | 2822 | return slab_alloc(s, gfpflags, node, caller); |