diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-06-08 16:46:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-08 20:23:33 -0400 |
commit | 272c1d21d6fe42979068e14c04fb60fb6045ad74 (patch) | |
tree | 6a365c67ed8575d15a59aa2183df609368359724 | |
parent | a17627ef8833ac30622a7b39b7be390e1b174405 (diff) |
SLUB: return ZERO_SIZE_PTR for kmalloc(0)
Instead of returning the smallest available object return ZERO_SIZE_PTR.
A ZERO_SIZE_PTR can be legitimately used as an object pointer as long as it
is not deferenced. The dereference of ZERO_SIZE_PTR causes a distinctive
fault. kfree can handle a ZERO_SIZE_PTR in the same way as NULL.
This enables functions to use zero sized object. e.g. n = number of objects.
objects = kmalloc(n * sizeof(object));
for (i = 0; i < n; i++)
objects[i].x = y;
kfree(objects);
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slub_def.h | 25 | ||||
-rw-r--r-- | mm/slub.c | 26 |
2 files changed, 35 insertions, 16 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0764c829d967..a0ad37463d62 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -70,11 +70,8 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |||
70 | */ | 70 | */ |
71 | static inline int kmalloc_index(size_t size) | 71 | static inline int kmalloc_index(size_t size) |
72 | { | 72 | { |
73 | /* | 73 | if (!size) |
74 | * We should return 0 if size == 0 but we use the smallest object | 74 | return 0; |
75 | * here for SLAB legacy reasons. | ||
76 | */ | ||
77 | WARN_ON_ONCE(size == 0); | ||
78 | 75 | ||
79 | if (size > KMALLOC_MAX_SIZE) | 76 | if (size > KMALLOC_MAX_SIZE) |
80 | return -1; | 77 | return -1; |
@@ -153,13 +150,25 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) | |||
153 | #define SLUB_DMA 0 | 150 | #define SLUB_DMA 0 |
154 | #endif | 151 | #endif |
155 | 152 | ||
153 | |||
154 | /* | ||
155 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
156 | * | ||
157 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
158 | * | ||
159 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
160 | * Both make kfree a no-op. | ||
161 | */ | ||
162 | #define ZERO_SIZE_PTR ((void *)16) | ||
163 | |||
164 | |||
156 | static inline void *kmalloc(size_t size, gfp_t flags) | 165 | static inline void *kmalloc(size_t size, gfp_t flags) |
157 | { | 166 | { |
158 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | 167 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { |
159 | struct kmem_cache *s = kmalloc_slab(size); | 168 | struct kmem_cache *s = kmalloc_slab(size); |
160 | 169 | ||
161 | if (!s) | 170 | if (!s) |
162 | return NULL; | 171 | return ZERO_SIZE_PTR; |
163 | 172 | ||
164 | return kmem_cache_alloc(s, flags); | 173 | return kmem_cache_alloc(s, flags); |
165 | } else | 174 | } else |
@@ -172,7 +181,7 @@ static inline void *kzalloc(size_t size, gfp_t flags) | |||
172 | struct kmem_cache *s = kmalloc_slab(size); | 181 | struct kmem_cache *s = kmalloc_slab(size); |
173 | 182 | ||
174 | if (!s) | 183 | if (!s) |
175 | return NULL; | 184 | return ZERO_SIZE_PTR; |
176 | 185 | ||
177 | return kmem_cache_zalloc(s, flags); | 186 | return kmem_cache_zalloc(s, flags); |
178 | } else | 187 | } else |
@@ -188,7 +197,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
188 | struct kmem_cache *s = kmalloc_slab(size); | 197 | struct kmem_cache *s = kmalloc_slab(size); |
189 | 198 | ||
190 | if (!s) | 199 | if (!s) |
191 | return NULL; | 200 | return ZERO_SIZE_PTR; |
192 | 201 | ||
193 | return kmem_cache_alloc_node(s, flags, node); | 202 | return kmem_cache_alloc_node(s, flags, node); |
194 | } else | 203 | } else |
@@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2241 | 2241 | ||
2242 | if (s) | 2242 | if (s) |
2243 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2243 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); |
2244 | return NULL; | 2244 | return ZERO_SIZE_PTR; |
2245 | } | 2245 | } |
2246 | EXPORT_SYMBOL(__kmalloc); | 2246 | EXPORT_SYMBOL(__kmalloc); |
2247 | 2247 | ||
@@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2252 | 2252 | ||
2253 | if (s) | 2253 | if (s) |
2254 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2254 | return slab_alloc(s, flags, node, __builtin_return_address(0)); |
2255 | return NULL; | 2255 | return ZERO_SIZE_PTR; |
2256 | } | 2256 | } |
2257 | EXPORT_SYMBOL(__kmalloc_node); | 2257 | EXPORT_SYMBOL(__kmalloc_node); |
2258 | #endif | 2258 | #endif |
2259 | 2259 | ||
2260 | size_t ksize(const void *object) | 2260 | size_t ksize(const void *object) |
2261 | { | 2261 | { |
2262 | struct page *page = get_object_page(object); | 2262 | struct page *page; |
2263 | struct kmem_cache *s; | 2263 | struct kmem_cache *s; |
2264 | 2264 | ||
2265 | if (object == ZERO_SIZE_PTR) | ||
2266 | return 0; | ||
2267 | |||
2268 | page = get_object_page(object); | ||
2265 | BUG_ON(!page); | 2269 | BUG_ON(!page); |
2266 | s = page->slab; | 2270 | s = page->slab; |
2267 | BUG_ON(!s); | 2271 | BUG_ON(!s); |
@@ -2293,7 +2297,13 @@ void kfree(const void *x) | |||
2293 | struct kmem_cache *s; | 2297 | struct kmem_cache *s; |
2294 | struct page *page; | 2298 | struct page *page; |
2295 | 2299 | ||
2296 | if (!x) | 2300 | /* |
2301 | * This has to be an unsigned comparison. According to Linus | ||
2302 | * some gcc version treat a pointer as a signed entity. Then | ||
2303 | * this comparison would be true for all "negative" pointers | ||
2304 | * (which would cover the whole upper half of the address space). | ||
2305 | */ | ||
2306 | if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) | ||
2297 | return; | 2307 | return; |
2298 | 2308 | ||
2299 | page = virt_to_head_page(x); | 2309 | page = virt_to_head_page(x); |
@@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
2398 | void *ret; | 2408 | void *ret; |
2399 | size_t ks; | 2409 | size_t ks; |
2400 | 2410 | ||
2401 | if (unlikely(!p)) | 2411 | if (unlikely(!p || p == ZERO_SIZE_PTR)) |
2402 | return kmalloc(new_size, flags); | 2412 | return kmalloc(new_size, flags); |
2403 | 2413 | ||
2404 | if (unlikely(!new_size)) { | 2414 | if (unlikely(!new_size)) { |
2405 | kfree(p); | 2415 | kfree(p); |
2406 | return NULL; | 2416 | return ZERO_SIZE_PTR; |
2407 | } | 2417 | } |
2408 | 2418 | ||
2409 | ks = ksize(p); | 2419 | ks = ksize(p); |
@@ -2652,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
2652 | struct kmem_cache *s = get_slab(size, gfpflags); | 2662 | struct kmem_cache *s = get_slab(size, gfpflags); |
2653 | 2663 | ||
2654 | if (!s) | 2664 | if (!s) |
2655 | return NULL; | 2665 | return ZERO_SIZE_PTR; |
2656 | 2666 | ||
2657 | return slab_alloc(s, gfpflags, -1, caller); | 2667 | return slab_alloc(s, gfpflags, -1, caller); |
2658 | } | 2668 | } |
@@ -2663,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2663 | struct kmem_cache *s = get_slab(size, gfpflags); | 2673 | struct kmem_cache *s = get_slab(size, gfpflags); |
2664 | 2674 | ||
2665 | if (!s) | 2675 | if (!s) |
2666 | return NULL; | 2676 | return ZERO_SIZE_PTR; |
2667 | 2677 | ||
2668 | return slab_alloc(s, gfpflags, node, caller); | 2678 | return slab_alloc(s, gfpflags, node, caller); |
2669 | } | 2679 | } |