aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:22 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commit6cb8f91320d3e720351c21741da795fed580b21b (patch)
treec9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d
parentef2ad80c7d255ed0449eda947c2d700635b7e0f5 (diff)
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the allocators. Move ZERO_SIZE_PTR related stuff into slab.h. Make ZERO_SIZE_PTR work for all slab allocators and get rid of the WARN_ON_ONCE(size == 0) that is still remaining in SLAB. Make slub return NULL like the other allocators if a too large memory segment is requested via __kmalloc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/slab.h13
-rw-r--r--include/linux/slab_def.h12
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slob.c11
-rw-r--r--mm/slub.c29
-rw-r--r--mm/util.c2
7 files changed, 57 insertions, 35 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 27402fea9b79..0289ec89300a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -31,6 +31,19 @@
31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
32 32
33/* 33/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 *
36 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
37 *
38 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
39 * Both make kfree a no-op.
40 */
41#define ZERO_SIZE_PTR ((void *)16)
42
43#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
44 (unsigned long)ZERO_SIZE_PTR)
45
46/*
34 * struct kmem_cache related prototypes 47 * struct kmem_cache related prototypes
35 */ 48 */
36void __init kmem_cache_init(void); 49void __init kmem_cache_init(void);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 365d036c454a..16e814ffab8d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags)
32{ 32{
33 if (__builtin_constant_p(size)) { 33 if (__builtin_constant_p(size)) {
34 int i = 0; 34 int i = 0;
35
36 if (!size)
37 return ZERO_SIZE_PTR;
38
35#define CACHE(x) \ 39#define CACHE(x) \
36 if (size <= x) \ 40 if (size <= x) \
37 goto found; \ 41 goto found; \
@@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags)
58{ 62{
59 if (__builtin_constant_p(size)) { 63 if (__builtin_constant_p(size)) {
60 int i = 0; 64 int i = 0;
65
66 if (!size)
67 return ZERO_SIZE_PTR;
68
61#define CACHE(x) \ 69#define CACHE(x) \
62 if (size <= x) \ 70 if (size <= x) \
63 goto found; \ 71 goto found; \
@@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88{ 96{
89 if (__builtin_constant_p(size)) { 97 if (__builtin_constant_p(size)) {
90 int i = 0; 98 int i = 0;
99
100 if (!size)
101 return ZERO_SIZE_PTR;
102
91#define CACHE(x) \ 103#define CACHE(x) \
92 if (size <= x) \ 104 if (size <= x) \
93 goto found; \ 105 goto found; \
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a582f6771525..579b0a22858e 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
159#define SLUB_DMA 0 159#define SLUB_DMA 0
160#endif 160#endif
161 161
162
163/*
164 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
165 *
166 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
167 *
168 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
169 * Both make kfree a no-op.
170 */
171#define ZERO_SIZE_PTR ((void *)16)
172
173
174void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 162void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
175void *__kmalloc(size_t size, gfp_t flags); 163void *__kmalloc(size_t size, gfp_t flags);
176 164
diff --git a/mm/slab.c b/mm/slab.c
index 4bd8a53091b7..d2cd304fd8af 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
775 */ 775 */
776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
777#endif 777#endif
778 if (!size)
779 return ZERO_SIZE_PTR;
780
778 while (size > csizep->cs_size) 781 while (size > csizep->cs_size)
779 csizep++; 782 csizep++;
780 783
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2351 * this should not happen at all. 2354 * this should not happen at all.
2352 * But leave a BUG_ON for some lucky dude. 2355 * But leave a BUG_ON for some lucky dude.
2353 */ 2356 */
2354 BUG_ON(!cachep->slabp_cache); 2357 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2355 } 2358 }
2356 cachep->ctor = ctor; 2359 cachep->ctor = ctor;
2357 cachep->name = name; 2360 cachep->name = name;
@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3653 struct kmem_cache *cachep; 3656 struct kmem_cache *cachep;
3654 3657
3655 cachep = kmem_find_general_cachep(size, flags); 3658 cachep = kmem_find_general_cachep(size, flags);
3656 if (unlikely(cachep == NULL)) 3659 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3657 return NULL; 3660 return cachep;
3658 return kmem_cache_alloc_node(cachep, flags, node); 3661 return kmem_cache_alloc_node(cachep, flags, node);
3659} 3662}
3660 3663
@@ -3760,7 +3763,7 @@ void kfree(const void *objp)
3760 struct kmem_cache *c; 3763 struct kmem_cache *c;
3761 unsigned long flags; 3764 unsigned long flags;
3762 3765
3763 if (unlikely(!objp)) 3766 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3764 return; 3767 return;
3765 local_irq_save(flags); 3768 local_irq_save(flags);
3766 kfree_debugcheck(objp); 3769 kfree_debugcheck(objp);
@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = {
4447 */ 4450 */
4448size_t ksize(const void *objp) 4451size_t ksize(const void *objp)
4449{ 4452{
4450 if (unlikely(objp == NULL)) 4453 if (unlikely(ZERO_OR_NULL_PTR(objp)))
4451 return 0; 4454 return 0;
4452 4455
4453 return obj_size(virt_to_cache(objp)); 4456 return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index 154e7bdf3544..41d32c3c0be4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -347,7 +347,7 @@ static void slob_free(void *block, int size)
347 slobidx_t units; 347 slobidx_t units;
348 unsigned long flags; 348 unsigned long flags;
349 349
350 if (!block) 350 if (ZERO_OR_NULL_PTR(block))
351 return; 351 return;
352 BUG_ON(!size); 352 BUG_ON(!size);
353 353
@@ -424,10 +424,13 @@ out:
424 424
425void *__kmalloc_node(size_t size, gfp_t gfp, int node) 425void *__kmalloc_node(size_t size, gfp_t gfp, int node)
426{ 426{
427 unsigned int *m;
427 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 428 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
428 429
429 if (size < PAGE_SIZE - align) { 430 if (size < PAGE_SIZE - align) {
430 unsigned int *m; 431 if (!size)
432 return ZERO_SIZE_PTR;
433
431 m = slob_alloc(size + align, gfp, align, node); 434 m = slob_alloc(size + align, gfp, align, node);
432 if (m) 435 if (m)
433 *m = size; 436 *m = size;
@@ -450,7 +453,7 @@ void kfree(const void *block)
450{ 453{
451 struct slob_page *sp; 454 struct slob_page *sp;
452 455
453 if (!block) 456 if (ZERO_OR_NULL_PTR(block))
454 return; 457 return;
455 458
456 sp = (struct slob_page *)virt_to_page(block); 459 sp = (struct slob_page *)virt_to_page(block);
@@ -468,7 +471,7 @@ size_t ksize(const void *block)
468{ 471{
469 struct slob_page *sp; 472 struct slob_page *sp;
470 473
471 if (!block) 474 if (ZERO_OR_NULL_PTR(block))
472 return 0; 475 return 0;
473 476
474 sp = (struct slob_page *)virt_to_page(block); 477 sp = (struct slob_page *)virt_to_page(block);
diff --git a/mm/slub.c b/mm/slub.c
index 1b0a95d75dbb..548d78df81e1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2270 int index = kmalloc_index(size); 2270 int index = kmalloc_index(size);
2271 2271
2272 if (!index) 2272 if (!index)
2273 return NULL; 2273 return ZERO_SIZE_PTR;
2274 2274
2275 /* Allocation too large? */ 2275 /* Allocation too large? */
2276 BUG_ON(index < 0); 2276 if (index < 0)
2277 return NULL;
2277 2278
2278#ifdef CONFIG_ZONE_DMA 2279#ifdef CONFIG_ZONE_DMA
2279 if ((flags & SLUB_DMA)) { 2280 if ((flags & SLUB_DMA)) {
@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags)
2314{ 2315{
2315 struct kmem_cache *s = get_slab(size, flags); 2316 struct kmem_cache *s = get_slab(size, flags);
2316 2317
2317 if (s) 2318 if (ZERO_OR_NULL_PTR(s))
2318 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2319 return s;
2319 return ZERO_SIZE_PTR; 2320
2321 return slab_alloc(s, flags, -1, __builtin_return_address(0));
2320} 2322}
2321EXPORT_SYMBOL(__kmalloc); 2323EXPORT_SYMBOL(__kmalloc);
2322 2324
@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2325{ 2327{
2326 struct kmem_cache *s = get_slab(size, flags); 2328 struct kmem_cache *s = get_slab(size, flags);
2327 2329
2328 if (s) 2330 if (ZERO_OR_NULL_PTR(s))
2329 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2331 return s;
2330 return ZERO_SIZE_PTR; 2332
2333 return slab_alloc(s, flags, node, __builtin_return_address(0));
2331} 2334}
2332EXPORT_SYMBOL(__kmalloc_node); 2335EXPORT_SYMBOL(__kmalloc_node);
2333#endif 2336#endif
@@ -2378,7 +2381,7 @@ void kfree(const void *x)
2378 * this comparison would be true for all "negative" pointers 2381 * this comparison would be true for all "negative" pointers
2379 * (which would cover the whole upper half of the address space). 2382 * (which would cover the whole upper half of the address space).
2380 */ 2383 */
2381 if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) 2384 if (ZERO_OR_NULL_PTR(x))
2382 return; 2385 return;
2383 2386
2384 page = virt_to_head_page(x); 2387 page = virt_to_head_page(x);
@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2687{ 2690{
2688 struct kmem_cache *s = get_slab(size, gfpflags); 2691 struct kmem_cache *s = get_slab(size, gfpflags);
2689 2692
2690 if (!s) 2693 if (ZERO_OR_NULL_PTR(s))
2691 return ZERO_SIZE_PTR; 2694 return s;
2692 2695
2693 return slab_alloc(s, gfpflags, -1, caller); 2696 return slab_alloc(s, gfpflags, -1, caller);
2694} 2697}
@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2698{ 2701{
2699 struct kmem_cache *s = get_slab(size, gfpflags); 2702 struct kmem_cache *s = get_slab(size, gfpflags);
2700 2703
2701 if (!s) 2704 if (ZERO_OR_NULL_PTR(s))
2702 return ZERO_SIZE_PTR; 2705 return s;
2703 2706
2704 return slab_alloc(s, gfpflags, node, caller); 2707 return slab_alloc(s, gfpflags, node, caller);
2705} 2708}
diff --git a/mm/util.c b/mm/util.c
index 18396ea63ee6..f2f21b775516 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
76 76
77 if (unlikely(!new_size)) { 77 if (unlikely(!new_size)) {
78 kfree(p); 78 kfree(p);
79 return NULL; 79 return ZERO_SIZE_PTR;
80 } 80 }
81 81
82 ks = ksize(p); 82 ks = ksize(p);