diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-06-08 16:46:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-08 20:23:33 -0400 |
commit | 272c1d21d6fe42979068e14c04fb60fb6045ad74 (patch) | |
tree | 6a365c67ed8575d15a59aa2183df609368359724 /mm/slub.c | |
parent | a17627ef8833ac30622a7b39b7be390e1b174405 (diff) |
SLUB: return ZERO_SIZE_PTR for kmalloc(0)
Instead of returning the smallest available object return ZERO_SIZE_PTR.
A ZERO_SIZE_PTR can be legitimately used as an object pointer as long as it
is not deferenced. The dereference of ZERO_SIZE_PTR causes a distinctive
fault. kfree can handle a ZERO_SIZE_PTR in the same way as NULL.
This enables functions to use zero sized object. e.g. n = number of objects.
objects = kmalloc(n * sizeof(object));
for (i = 0; i < n; i++)
objects[i].x = y;
kfree(objects);
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 26 |
1 files changed, 18 insertions, 8 deletions
@@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2241 | 2241 | ||
2242 | if (s) | 2242 | if (s) |
2243 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2243 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); |
2244 | return NULL; | 2244 | return ZERO_SIZE_PTR; |
2245 | } | 2245 | } |
2246 | EXPORT_SYMBOL(__kmalloc); | 2246 | EXPORT_SYMBOL(__kmalloc); |
2247 | 2247 | ||
@@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2252 | 2252 | ||
2253 | if (s) | 2253 | if (s) |
2254 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2254 | return slab_alloc(s, flags, node, __builtin_return_address(0)); |
2255 | return NULL; | 2255 | return ZERO_SIZE_PTR; |
2256 | } | 2256 | } |
2257 | EXPORT_SYMBOL(__kmalloc_node); | 2257 | EXPORT_SYMBOL(__kmalloc_node); |
2258 | #endif | 2258 | #endif |
2259 | 2259 | ||
2260 | size_t ksize(const void *object) | 2260 | size_t ksize(const void *object) |
2261 | { | 2261 | { |
2262 | struct page *page = get_object_page(object); | 2262 | struct page *page; |
2263 | struct kmem_cache *s; | 2263 | struct kmem_cache *s; |
2264 | 2264 | ||
2265 | if (object == ZERO_SIZE_PTR) | ||
2266 | return 0; | ||
2267 | |||
2268 | page = get_object_page(object); | ||
2265 | BUG_ON(!page); | 2269 | BUG_ON(!page); |
2266 | s = page->slab; | 2270 | s = page->slab; |
2267 | BUG_ON(!s); | 2271 | BUG_ON(!s); |
@@ -2293,7 +2297,13 @@ void kfree(const void *x) | |||
2293 | struct kmem_cache *s; | 2297 | struct kmem_cache *s; |
2294 | struct page *page; | 2298 | struct page *page; |
2295 | 2299 | ||
2296 | if (!x) | 2300 | /* |
2301 | * This has to be an unsigned comparison. According to Linus | ||
2302 | * some gcc version treat a pointer as a signed entity. Then | ||
2303 | * this comparison would be true for all "negative" pointers | ||
2304 | * (which would cover the whole upper half of the address space). | ||
2305 | */ | ||
2306 | if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) | ||
2297 | return; | 2307 | return; |
2298 | 2308 | ||
2299 | page = virt_to_head_page(x); | 2309 | page = virt_to_head_page(x); |
@@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
2398 | void *ret; | 2408 | void *ret; |
2399 | size_t ks; | 2409 | size_t ks; |
2400 | 2410 | ||
2401 | if (unlikely(!p)) | 2411 | if (unlikely(!p || p == ZERO_SIZE_PTR)) |
2402 | return kmalloc(new_size, flags); | 2412 | return kmalloc(new_size, flags); |
2403 | 2413 | ||
2404 | if (unlikely(!new_size)) { | 2414 | if (unlikely(!new_size)) { |
2405 | kfree(p); | 2415 | kfree(p); |
2406 | return NULL; | 2416 | return ZERO_SIZE_PTR; |
2407 | } | 2417 | } |
2408 | 2418 | ||
2409 | ks = ksize(p); | 2419 | ks = ksize(p); |
@@ -2652,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2652 | struct kmem_cache *s = get_slab(size, gfpflags); | 2662 | struct kmem_cache *s = get_slab(size, gfpflags); |
2653 | 2663 | ||
2654 | if (!s) | 2664 | if (!s) |
2655 | return NULL; | 2665 | return ZERO_SIZE_PTR; |
2656 | 2666 | ||
2657 | return slab_alloc(s, gfpflags, -1, caller); | 2667 | return slab_alloc(s, gfpflags, -1, caller); |
2658 | } | 2668 | } |
@@ -2663,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2663 | struct kmem_cache *s = get_slab(size, gfpflags); | 2673 | struct kmem_cache *s = get_slab(size, gfpflags); |
2664 | 2674 | ||
2665 | if (!s) | 2675 | if (!s) |
2666 | return NULL; | 2676 | return ZERO_SIZE_PTR; |
2667 | 2677 | ||
2668 | return slab_alloc(s, gfpflags, node, caller); | 2678 | return slab_alloc(s, gfpflags, node, caller); |
2669 | } | 2679 | } |