aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2007-05-06 17:48:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:50 -0400
commitfd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4 (patch)
tree66f310ab9d7cdadfb79486700f1e01df7923ec14 /mm/slab.c
parente3ebadd95cb621e2c7436f3d3646447ac9d5c16d (diff)
slab: introduce krealloc
This introduce krealloc() that reallocates memory while keeping the contents unchanged. The allocator avoids reallocation if the new size fits the currently used cache. I also added a simple non-optimized version for mm/slob.c for compatibility. [akpm@linux-foundation.org: fix warnings] Acked-by: Josef Sipek <jsipek@fsl.cs.sunysb.edu> Acked-by: Matt Mackall <mpm@selenic.com> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c49
1 files changed, 48 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 168bfe9d8ffe..8b71a9c3daa4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3740,6 +3740,53 @@ EXPORT_SYMBOL(__kmalloc);
3740#endif 3740#endif
3741 3741
3742/** 3742/**
3743 * krealloc - reallocate memory. The contents will remain unchanged.
3744 *
3745 * @p: object to reallocate memory for.
3746 * @new_size: how many bytes of memory are required.
3747 * @flags: the type of memory to allocate.
3748 *
3749 * The contents of the object pointed to are preserved up to the
3750 * lesser of the new and old sizes. If @p is %NULL, krealloc()
3751 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
3752 * %NULL pointer, the object pointed to is freed.
3753 */
3754void *krealloc(const void *p, size_t new_size, gfp_t flags)
3755{
3756 struct kmem_cache *cache, *new_cache;
3757 void *ret;
3758
3759 if (unlikely(!p))
3760 return kmalloc_track_caller(new_size, flags);
3761
3762 if (unlikely(!new_size)) {
3763 kfree(p);
3764 return NULL;
3765 }
3766
3767 cache = virt_to_cache(p);
3768 new_cache = __find_general_cachep(new_size, flags);
3769
3770 /*
3771 * If new size fits in the current cache, bail out.
3772 */
3773 if (likely(cache == new_cache))
3774 return (void *)p;
3775
3776 /*
3777 * We are on the slow-path here so do not use __cache_alloc
3778 * because it bloats kernel text.
3779 */
3780 ret = kmalloc_track_caller(new_size, flags);
3781 if (ret) {
3782 memcpy(ret, p, min(new_size, ksize(p)));
3783 kfree(p);
3784 }
3785 return ret;
3786}
3787EXPORT_SYMBOL(krealloc);
3788
3789/**
3743 * kmem_cache_free - Deallocate an object 3790 * kmem_cache_free - Deallocate an object
3744 * @cachep: The cache the allocation was from. 3791 * @cachep: The cache the allocation was from.
3745 * @objp: The previously allocated object. 3792 * @objp: The previously allocated object.
@@ -4481,7 +4528,7 @@ const struct seq_operations slabstats_op = {
4481 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4528 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4482 * must not be freed during the duration of the call. 4529 * must not be freed during the duration of the call.
4483 */ 4530 */
4484unsigned int ksize(const void *objp) 4531size_t ksize(const void *objp)
4485{ 4532{
4486 if (unlikely(objp == NULL)) 4533 if (unlikely(objp == NULL))
4487 return 0; 4534 return 0;