aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2009-02-20 18:38:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-20 20:57:48 -0500
commit3ef0e5ba467366125f04b423f4638baca54a4fc1 (patch)
treecfe7b95c7b19b1d5b2a8534fa9791d6026e49dcd /include/linux/slab.h
parentd9190913b71831f5e3d04de62cfb1fd069a9db35 (diff)
slab: introduce kzfree()
kzfree() is a wrapper for kfree() that additionally zeroes the underlying memory before releasing it to the slab allocator. Currently there is code which memset()s the memory region of an object before releasing it back to the slab allocator to make sure security-sensitive data are really zeroed out after use. These callsites can then just use kzfree() which saves some code, makes users greppable and allows for a stupid destructor that isn't necessarily aware of the actual object size. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Matt Mackall <mpm@selenic.com> Acked-by: Christoph Lameter <cl@linux-foundation.org> Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index f96d13c281e8..24c5602bee99 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -127,6 +127,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
127void * __must_check __krealloc(const void *, size_t, gfp_t); 127void * __must_check __krealloc(const void *, size_t, gfp_t);
128void * __must_check krealloc(const void *, size_t, gfp_t); 128void * __must_check krealloc(const void *, size_t, gfp_t);
129void kfree(const void *); 129void kfree(const void *);
130void kzfree(const void *);
130size_t ksize(const void *); 131size_t ksize(const void *);
131 132
132/* 133/*