aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:24:46 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:53 -0400
commitef8b4520bd9f8294ffce9abd6158085bde5dc902 (patch)
treec099a16691ac06208f4d3d65b71e7adaf7361fcd /mm
parent0da7e01f5f37f441cccd7c8c0586e06db0981907 (diff)
Slab allocators: fail if ksize is called with a NULL parameter
A NULL pointer means that the object was not allocated. One cannot determine the size of an object that has not been allocated. Currently we return 0 but we really should BUG() on attempts to determine the size of something nonexistent. krealloc() interprets NULL to mean a zero sized object. Handle that separately in krealloc(). Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slob.c3
-rw-r--r--mm/slub.c3
-rw-r--r--mm/util.c6
4 files changed, 10 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6f6abef83a1a..1b240a3029d6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4446,7 +4446,8 @@ const struct seq_operations slabstats_op = {
4446 */ 4446 */
4447size_t ksize(const void *objp) 4447size_t ksize(const void *objp)
4448{ 4448{
4449 if (unlikely(ZERO_OR_NULL_PTR(objp))) 4449 BUG_ON(!objp);
4450 if (unlikely(objp == ZERO_SIZE_PTR))
4450 return 0; 4451 return 0;
4451 4452
4452 return obj_size(virt_to_cache(objp)); 4453 return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index a886e83e17ef..de5d5563a46c 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -484,7 +484,8 @@ size_t ksize(const void *block)
484{ 484{
485 struct slob_page *sp; 485 struct slob_page *sp;
486 486
487 if (unlikely(ZERO_OR_NULL_PTR(block))) 487 BUG_ON(!block);
488 if (unlikely(block == ZERO_SIZE_PTR))
488 return 0; 489 return 0;
489 490
490 sp = (struct slob_page *)virt_to_page(block); 491 sp = (struct slob_page *)virt_to_page(block);
diff --git a/mm/slub.c b/mm/slub.c
index b7d3664fa3a9..d7c044dbd157 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ size_t ksize(const void *object)
2449 struct page *page; 2449 struct page *page;
2450 struct kmem_cache *s; 2450 struct kmem_cache *s;
2451 2451
2452 if (unlikely(ZERO_OR_NULL_PTR(object))) 2452 BUG_ON(!object);
2453 if (unlikely(object == ZERO_SIZE_PTR))
2453 return 0; 2454 return 0;
2454 2455
2455 page = get_object_page(object); 2456 page = get_object_page(object);
diff --git a/mm/util.c b/mm/util.c
index bf340d806868..5f64026cbb4d 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -81,14 +81,16 @@ EXPORT_SYMBOL(kmemdup);
81void *krealloc(const void *p, size_t new_size, gfp_t flags) 81void *krealloc(const void *p, size_t new_size, gfp_t flags)
82{ 82{
83 void *ret; 83 void *ret;
84 size_t ks; 84 size_t ks = 0;
85 85
86 if (unlikely(!new_size)) { 86 if (unlikely(!new_size)) {
87 kfree(p); 87 kfree(p);
88 return ZERO_SIZE_PTR; 88 return ZERO_SIZE_PTR;
89 } 89 }
90 90
91 ks = ksize(p); 91 if (p)
92 ks = ksize(p);
93
92 if (ks >= new_size) 94 if (ks >= new_size)
93 return (void *)p; 95 return (void *)p;
94 96