aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c71
1 files changed, 64 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a14787799014..b6d9b8cdefa9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -112,6 +112,7 @@
112#include <linux/rtmutex.h> 112#include <linux/rtmutex.h>
113#include <linux/reciprocal_div.h> 113#include <linux/reciprocal_div.h>
114#include <linux/debugobjects.h> 114#include <linux/debugobjects.h>
115#include <linux/kmemtrace.h>
115 116
116#include <asm/cacheflush.h> 117#include <asm/cacheflush.h>
117#include <asm/tlbflush.h> 118#include <asm/tlbflush.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3613 */ 3622 */
3614void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3623void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3615{ 3624{
3616 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3625 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3626
3627 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3628 obj_size(cachep), cachep->buffer_size, flags);
3629
3630 return ret;
3617} 3631}
3618EXPORT_SYMBOL(kmem_cache_alloc); 3632EXPORT_SYMBOL(kmem_cache_alloc);
3619 3633
3634#ifdef CONFIG_KMEMTRACE
3635void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3636{
3637 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3638}
3639EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3640#endif
3641
3620/** 3642/**
3621 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3643 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3622 * @cachep: the cache we're checking against 3644 * @cachep: the cache we're checking against
@@ -3661,23 +3683,47 @@ out:
3661#ifdef CONFIG_NUMA 3683#ifdef CONFIG_NUMA
3662void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3684void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3663{ 3685{
3664 return __cache_alloc_node(cachep, flags, nodeid, 3686 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3665 __builtin_return_address(0)); 3687 __builtin_return_address(0));
3688
3689 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3690 obj_size(cachep), cachep->buffer_size,
3691 flags, nodeid);
3692
3693 return ret;
3666} 3694}
3667EXPORT_SYMBOL(kmem_cache_alloc_node); 3695EXPORT_SYMBOL(kmem_cache_alloc_node);
3668 3696
3697#ifdef CONFIG_KMEMTRACE
3698void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3699 gfp_t flags,
3700 int nodeid)
3701{
3702 return __cache_alloc_node(cachep, flags, nodeid,
3703 __builtin_return_address(0));
3704}
3705EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3706#endif
3707
3669static __always_inline void * 3708static __always_inline void *
3670__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3709__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3671{ 3710{
3672 struct kmem_cache *cachep; 3711 struct kmem_cache *cachep;
3712 void *ret;
3673 3713
3674 cachep = kmem_find_general_cachep(size, flags); 3714 cachep = kmem_find_general_cachep(size, flags);
3675 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3715 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3676 return cachep; 3716 return cachep;
3677 return kmem_cache_alloc_node(cachep, flags, node); 3717 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3718
3719 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
3720 (unsigned long) caller, ret,
3721 size, cachep->buffer_size, flags, node);
3722
3723 return ret;
3678} 3724}
3679 3725
3680#ifdef CONFIG_DEBUG_SLAB 3726#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3681void *__kmalloc_node(size_t size, gfp_t flags, int node) 3727void *__kmalloc_node(size_t size, gfp_t flags, int node)
3682{ 3728{
3683 return __do_kmalloc_node(size, flags, node, 3729 return __do_kmalloc_node(size, flags, node,
@@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3710 void *caller) 3756 void *caller)
3711{ 3757{
3712 struct kmem_cache *cachep; 3758 struct kmem_cache *cachep;
3759 void *ret;
3713 3760
3714 /* If you want to save a few bytes .text space: replace 3761 /* If you want to save a few bytes .text space: replace
3715 * __ with kmem_. 3762 * __ with kmem_.
@@ -3719,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3719 cachep = __find_general_cachep(size, flags); 3766 cachep = __find_general_cachep(size, flags);
3720 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3767 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3721 return cachep; 3768 return cachep;
3722 return __cache_alloc(cachep, flags, caller); 3769 ret = __cache_alloc(cachep, flags, caller);
3770
3771 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
3772 (unsigned long) caller, ret,
3773 size, cachep->buffer_size, flags);
3774
3775 return ret;
3723} 3776}
3724 3777
3725 3778
3726#ifdef CONFIG_DEBUG_SLAB 3779#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3727void *__kmalloc(size_t size, gfp_t flags) 3780void *__kmalloc(size_t size, gfp_t flags)
3728{ 3781{
3729 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3782 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3762 debug_check_no_obj_freed(objp, obj_size(cachep)); 3815 debug_check_no_obj_freed(objp, obj_size(cachep));
3763 __cache_free(cachep, objp); 3816 __cache_free(cachep, objp);
3764 local_irq_restore(flags); 3817 local_irq_restore(flags);
3818
3819 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
3765} 3820}
3766EXPORT_SYMBOL(kmem_cache_free); 3821EXPORT_SYMBOL(kmem_cache_free);
3767 3822
@@ -3788,6 +3843,8 @@ void kfree(const void *objp)
3788 debug_check_no_obj_freed(objp, obj_size(c)); 3843 debug_check_no_obj_freed(objp, obj_size(c));
3789 __cache_free(c, (void *)objp); 3844 __cache_free(c, (void *)objp);
3790 local_irq_restore(flags); 3845 local_irq_restore(flags);
3846
3847 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3791} 3848}
3792EXPORT_SYMBOL(kfree); 3849EXPORT_SYMBOL(kfree);
3793 3850