diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-06-08 16:46:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-08 20:23:33 -0400 |
commit | 272c1d21d6fe42979068e14c04fb60fb6045ad74 (patch) | |
tree | 6a365c67ed8575d15a59aa2183df609368359724 /include/linux | |
parent | a17627ef8833ac30622a7b39b7be390e1b174405 (diff) |
SLUB: return ZERO_SIZE_PTR for kmalloc(0)
Instead of returning the smallest available object return ZERO_SIZE_PTR.
A ZERO_SIZE_PTR can be legitimately used as an object pointer as long as it
is not deferenced. The dereference of ZERO_SIZE_PTR causes a distinctive
fault. kfree can handle a ZERO_SIZE_PTR in the same way as NULL.
This enables functions to use zero sized object. e.g. n = number of objects.
objects = kmalloc(n * sizeof(object));
for (i = 0; i < n; i++)
objects[i].x = y;
kfree(objects);
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/slub_def.h | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0764c829d967..a0ad37463d62 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -70,11 +70,8 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |||
70 | */ | 70 | */ |
71 | static inline int kmalloc_index(size_t size) | 71 | static inline int kmalloc_index(size_t size) |
72 | { | 72 | { |
73 | /* | 73 | if (!size) |
74 | * We should return 0 if size == 0 but we use the smallest object | 74 | return 0; |
75 | * here for SLAB legacy reasons. | ||
76 | */ | ||
77 | WARN_ON_ONCE(size == 0); | ||
78 | 75 | ||
79 | if (size > KMALLOC_MAX_SIZE) | 76 | if (size > KMALLOC_MAX_SIZE) |
80 | return -1; | 77 | return -1; |
@@ -153,13 +150,25 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) | |||
153 | #define SLUB_DMA 0 | 150 | #define SLUB_DMA 0 |
154 | #endif | 151 | #endif |
155 | 152 | ||
153 | |||
154 | /* | ||
155 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | ||
156 | * | ||
157 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | ||
158 | * | ||
159 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | ||
160 | * Both make kfree a no-op. | ||
161 | */ | ||
162 | #define ZERO_SIZE_PTR ((void *)16) | ||
163 | |||
164 | |||
156 | static inline void *kmalloc(size_t size, gfp_t flags) | 165 | static inline void *kmalloc(size_t size, gfp_t flags) |
157 | { | 166 | { |
158 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | 167 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { |
159 | struct kmem_cache *s = kmalloc_slab(size); | 168 | struct kmem_cache *s = kmalloc_slab(size); |
160 | 169 | ||
161 | if (!s) | 170 | if (!s) |
162 | return NULL; | 171 | return ZERO_SIZE_PTR; |
163 | 172 | ||
164 | return kmem_cache_alloc(s, flags); | 173 | return kmem_cache_alloc(s, flags); |
165 | } else | 174 | } else |
@@ -172,7 +181,7 @@ static inline void *kzalloc(size_t size, gfp_t flags) | |||
172 | struct kmem_cache *s = kmalloc_slab(size); | 181 | struct kmem_cache *s = kmalloc_slab(size); |
173 | 182 | ||
174 | if (!s) | 183 | if (!s) |
175 | return NULL; | 184 | return ZERO_SIZE_PTR; |
176 | 185 | ||
177 | return kmem_cache_zalloc(s, flags); | 186 | return kmem_cache_zalloc(s, flags); |
178 | } else | 187 | } else |
@@ -188,7 +197,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
188 | struct kmem_cache *s = kmalloc_slab(size); | 197 | struct kmem_cache *s = kmalloc_slab(size); |
189 | 198 | ||
190 | if (!s) | 199 | if (!s) |
191 | return NULL; | 200 | return ZERO_SIZE_PTR; |
192 | 201 | ||
193 | return kmem_cache_alloc_node(s, flags, node); | 202 | return kmem_cache_alloc_node(s, flags, node); |
194 | } else | 203 | } else |