aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6fbd6a1cdeb..67527268b01 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2687,7 +2687,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2687 return objp; 2687 return objp;
2688} 2688}
2689 2689
2690static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags) 2690static __always_inline void *
2691__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
2691{ 2692{
2692 unsigned long save_flags; 2693 unsigned long save_flags;
2693 void *objp; 2694 void *objp;
@@ -2698,7 +2699,7 @@ static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2698 objp = ____cache_alloc(cachep, flags); 2699 objp = ____cache_alloc(cachep, flags);
2699 local_irq_restore(save_flags); 2700 local_irq_restore(save_flags);
2700 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 2701 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
2701 __builtin_return_address(0)); 2702 caller);
2702 prefetchw(objp); 2703 prefetchw(objp);
2703 return objp; 2704 return objp;
2704} 2705}
@@ -2927,7 +2928,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
2927 */ 2928 */
2928void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 2929void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2929{ 2930{
2930 return __cache_alloc(cachep, flags); 2931 return __cache_alloc(cachep, flags, __builtin_return_address(0));
2931} 2932}
2932EXPORT_SYMBOL(kmem_cache_alloc); 2933EXPORT_SYMBOL(kmem_cache_alloc);
2933 2934
@@ -3041,7 +3042,8 @@ EXPORT_SYMBOL(kmalloc_node);
3041 * platforms. For example, on i386, it means that the memory must come 3042 * platforms. For example, on i386, it means that the memory must come
3042 * from the first 16MB. 3043 * from the first 16MB.
3043 */ 3044 */
3044void *__kmalloc(size_t size, gfp_t flags) 3045static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3046 void *caller)
3045{ 3047{
3046 struct kmem_cache *cachep; 3048 struct kmem_cache *cachep;
3047 3049
@@ -3053,10 +3055,27 @@ void *__kmalloc(size_t size, gfp_t flags)
3053 cachep = __find_general_cachep(size, flags); 3055 cachep = __find_general_cachep(size, flags);
3054 if (unlikely(cachep == NULL)) 3056 if (unlikely(cachep == NULL))
3055 return NULL; 3057 return NULL;
3056 return __cache_alloc(cachep, flags); 3058 return __cache_alloc(cachep, flags, caller);
3059}
3060
3061#ifndef CONFIG_DEBUG_SLAB
3062
3063void *__kmalloc(size_t size, gfp_t flags)
3064{
3065 return __do_kmalloc(size, flags, NULL);
3057} 3066}
3058EXPORT_SYMBOL(__kmalloc); 3067EXPORT_SYMBOL(__kmalloc);
3059 3068
3069#else
3070
3071void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3072{
3073 return __do_kmalloc(size, flags, caller);
3074}
3075EXPORT_SYMBOL(__kmalloc_track_caller);
3076
3077#endif
3078
3060#ifdef CONFIG_SMP 3079#ifdef CONFIG_SMP
3061/** 3080/**
3062 * __alloc_percpu - allocate one copy of the object for every present 3081 * __alloc_percpu - allocate one copy of the object for every present