diff options
| -rw-r--r-- | mm/slab.c | 50 |
1 files changed, 24 insertions, 26 deletions
| @@ -3084,7 +3084,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |||
| 3084 | } | 3084 | } |
| 3085 | 3085 | ||
| 3086 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 3086 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, |
| 3087 | void *caller) | 3087 | unsigned long caller) |
| 3088 | { | 3088 | { |
| 3089 | struct page *page; | 3089 | struct page *page; |
| 3090 | unsigned int objnr; | 3090 | unsigned int objnr; |
| @@ -3104,7 +3104,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3104 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 3104 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; |
| 3105 | } | 3105 | } |
| 3106 | if (cachep->flags & SLAB_STORE_USER) | 3106 | if (cachep->flags & SLAB_STORE_USER) |
| 3107 | *dbg_userword(cachep, objp) = caller; | 3107 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3108 | 3108 | ||
| 3109 | objnr = obj_to_index(cachep, slabp, objp); | 3109 | objnr = obj_to_index(cachep, slabp, objp); |
| 3110 | 3110 | ||
| @@ -3117,7 +3117,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
| 3117 | if (cachep->flags & SLAB_POISON) { | 3117 | if (cachep->flags & SLAB_POISON) { |
| 3118 | #ifdef CONFIG_DEBUG_PAGEALLOC | 3118 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 3119 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 3119 | if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { |
| 3120 | store_stackinfo(cachep, objp, (unsigned long)caller); | 3120 | store_stackinfo(cachep, objp, caller); |
| 3121 | kernel_map_pages(virt_to_page(objp), | 3121 | kernel_map_pages(virt_to_page(objp), |
| 3122 | cachep->size / PAGE_SIZE, 0); | 3122 | cachep->size / PAGE_SIZE, 0); |
| 3123 | } else { | 3123 | } else { |
| @@ -3270,7 +3270,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, | |||
| 3270 | 3270 | ||
| 3271 | #if DEBUG | 3271 | #if DEBUG |
| 3272 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | 3272 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, |
| 3273 | gfp_t flags, void *objp, void *caller) | 3273 | gfp_t flags, void *objp, unsigned long caller) |
| 3274 | { | 3274 | { |
| 3275 | if (!objp) | 3275 | if (!objp) |
| 3276 | return objp; | 3276 | return objp; |
| @@ -3287,7 +3287,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3287 | poison_obj(cachep, objp, POISON_INUSE); | 3287 | poison_obj(cachep, objp, POISON_INUSE); |
| 3288 | } | 3288 | } |
| 3289 | if (cachep->flags & SLAB_STORE_USER) | 3289 | if (cachep->flags & SLAB_STORE_USER) |
| 3290 | *dbg_userword(cachep, objp) = caller; | 3290 | *dbg_userword(cachep, objp) = (void *)caller; |
| 3291 | 3291 | ||
| 3292 | if (cachep->flags & SLAB_RED_ZONE) { | 3292 | if (cachep->flags & SLAB_RED_ZONE) { |
| 3293 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 3293 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
| @@ -3562,7 +3562,7 @@ done: | |||
| 3562 | */ | 3562 | */ |
| 3563 | static __always_inline void * | 3563 | static __always_inline void * |
| 3564 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | 3564 | __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, |
| 3565 | void *caller) | 3565 | unsigned long caller) |
| 3566 | { | 3566 | { |
| 3567 | unsigned long save_flags; | 3567 | unsigned long save_flags; |
| 3568 | void *ptr; | 3568 | void *ptr; |
| @@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3648 | #endif /* CONFIG_NUMA */ | 3648 | #endif /* CONFIG_NUMA */ |
| 3649 | 3649 | ||
| 3650 | static __always_inline void * | 3650 | static __always_inline void * |
| 3651 | __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | 3651 | __cache_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) |
| 3652 | { | 3652 | { |
| 3653 | unsigned long save_flags; | 3653 | unsigned long save_flags; |
| 3654 | void *objp; | 3654 | void *objp; |
| @@ -3784,7 +3784,7 @@ free_done: | |||
| 3784 | * be in this state _before_ it is released. Called with disabled ints. | 3784 | * be in this state _before_ it is released. Called with disabled ints. |
| 3785 | */ | 3785 | */ |
| 3786 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, | 3786 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, |
| 3787 | void *caller) | 3787 | unsigned long caller) |
| 3788 | { | 3788 | { |
| 3789 | struct array_cache *ac = cpu_cache_get(cachep); | 3789 | struct array_cache *ac = cpu_cache_get(cachep); |
| 3790 | 3790 | ||
| @@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, | |||
| 3824 | */ | 3824 | */ |
| 3825 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3825 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
| 3826 | { | 3826 | { |
| 3827 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3827 | void *ret = __cache_alloc(cachep, flags, _RET_IP_); |
| 3828 | 3828 | ||
| 3829 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3829 | trace_kmem_cache_alloc(_RET_IP_, ret, |
| 3830 | cachep->object_size, cachep->size, flags); | 3830 | cachep->object_size, cachep->size, flags); |
| @@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | |||
| 3839 | { | 3839 | { |
| 3840 | void *ret; | 3840 | void *ret; |
| 3841 | 3841 | ||
| 3842 | ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3842 | ret = __cache_alloc(cachep, flags, _RET_IP_); |
| 3843 | 3843 | ||
| 3844 | trace_kmalloc(_RET_IP_, ret, | 3844 | trace_kmalloc(_RET_IP_, ret, |
| 3845 | size, cachep->size, flags); | 3845 | size, cachep->size, flags); |
| @@ -3851,8 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); | |||
| 3851 | #ifdef CONFIG_NUMA | 3851 | #ifdef CONFIG_NUMA |
| 3852 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3852 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
| 3853 | { | 3853 | { |
| 3854 | void *ret = __cache_alloc_node(cachep, flags, nodeid, | 3854 | void *ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP_); |
| 3855 | __builtin_return_address(0)); | ||
| 3856 | 3855 | ||
| 3857 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3856 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| 3858 | cachep->object_size, cachep->size, | 3857 | cachep->object_size, cachep->size, |
| @@ -3870,8 +3869,8 @@ void *kmem_cache_alloc_node_trace(size_t size, | |||
| 3870 | { | 3869 | { |
| 3871 | void *ret; | 3870 | void *ret; |
| 3872 | 3871 | ||
| 3873 | ret = __cache_alloc_node(cachep, flags, nodeid, | 3872 | ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP); |
| 3874 | __builtin_return_address(0)); | 3873 | |
| 3875 | trace_kmalloc_node(_RET_IP_, ret, | 3874 | trace_kmalloc_node(_RET_IP_, ret, |
| 3876 | size, cachep->size, | 3875 | size, cachep->size, |
| 3877 | flags, nodeid); | 3876 | flags, nodeid); |
| @@ -3881,7 +3880,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
| 3881 | #endif | 3880 | #endif |
| 3882 | 3881 | ||
| 3883 | static __always_inline void * | 3882 | static __always_inline void * |
| 3884 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3883 | __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) |
| 3885 | { | 3884 | { |
| 3886 | struct kmem_cache *cachep; | 3885 | struct kmem_cache *cachep; |
| 3887 | 3886 | ||
| @@ -3894,21 +3893,20 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | |||
| 3894 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3893 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3895 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3894 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3896 | { | 3895 | { |
| 3897 | return __do_kmalloc_node(size, flags, node, | 3896 | return __do_kmalloc_node(size, flags, node, _RET_IP_); |
| 3898 | __builtin_return_address(0)); | ||
| 3899 | } | 3897 | } |
| 3900 | EXPORT_SYMBOL(__kmalloc_node); | 3898 | EXPORT_SYMBOL(__kmalloc_node); |
| 3901 | 3899 | ||
| 3902 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3900 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
| 3903 | int node, unsigned long caller) | 3901 | int node, unsigned long caller) |
| 3904 | { | 3902 | { |
| 3905 | return __do_kmalloc_node(size, flags, node, (void *)caller); | 3903 | return __do_kmalloc_node(size, flags, node, caller); |
| 3906 | } | 3904 | } |
| 3907 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3905 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
| 3908 | #else | 3906 | #else |
| 3909 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3907 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3910 | { | 3908 | { |
| 3911 | return __do_kmalloc_node(size, flags, node, NULL); | 3909 | return __do_kmalloc_node(size, flags, node, 0); |
| 3912 | } | 3910 | } |
| 3913 | EXPORT_SYMBOL(__kmalloc_node); | 3911 | EXPORT_SYMBOL(__kmalloc_node); |
| 3914 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ | 3912 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ |
| @@ -3921,7 +3919,7 @@ EXPORT_SYMBOL(__kmalloc_node); | |||
| 3921 | * @caller: function caller for debug tracking of the caller | 3919 | * @caller: function caller for debug tracking of the caller |
| 3922 | */ | 3920 | */ |
| 3923 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | 3921 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, |
| 3924 | void *caller) | 3922 | unsigned long caller) |
| 3925 | { | 3923 | { |
| 3926 | struct kmem_cache *cachep; | 3924 | struct kmem_cache *cachep; |
| 3927 | void *ret; | 3925 | void *ret; |
| @@ -3936,7 +3934,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3936 | return cachep; | 3934 | return cachep; |
| 3937 | ret = __cache_alloc(cachep, flags, caller); | 3935 | ret = __cache_alloc(cachep, flags, caller); |
| 3938 | 3936 | ||
| 3939 | trace_kmalloc((unsigned long) caller, ret, | 3937 | trace_kmalloc(caller, ret, |
| 3940 | size, cachep->size, flags); | 3938 | size, cachep->size, flags); |
| 3941 | 3939 | ||
| 3942 | return ret; | 3940 | return ret; |
| @@ -3946,20 +3944,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3946 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3944 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
| 3947 | void *__kmalloc(size_t size, gfp_t flags) | 3945 | void *__kmalloc(size_t size, gfp_t flags) |
| 3948 | { | 3946 | { |
| 3949 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3947 | return __do_kmalloc(size, flags, _RET_IP_); |
| 3950 | } | 3948 | } |
| 3951 | EXPORT_SYMBOL(__kmalloc); | 3949 | EXPORT_SYMBOL(__kmalloc); |
| 3952 | 3950 | ||
| 3953 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) | 3951 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
| 3954 | { | 3952 | { |
| 3955 | return __do_kmalloc(size, flags, (void *)caller); | 3953 | return __do_kmalloc(size, flags, caller); |
| 3956 | } | 3954 | } |
| 3957 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3955 | EXPORT_SYMBOL(__kmalloc_track_caller); |
| 3958 | 3956 | ||
| 3959 | #else | 3957 | #else |
| 3960 | void *__kmalloc(size_t size, gfp_t flags) | 3958 | void *__kmalloc(size_t size, gfp_t flags) |
| 3961 | { | 3959 | { |
| 3962 | return __do_kmalloc(size, flags, NULL); | 3960 | return __do_kmalloc(size, flags, 0); |
| 3963 | } | 3961 | } |
| 3964 | EXPORT_SYMBOL(__kmalloc); | 3962 | EXPORT_SYMBOL(__kmalloc); |
| 3965 | #endif | 3963 | #endif |
| @@ -3980,7 +3978,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3980 | debug_check_no_locks_freed(objp, cachep->object_size); | 3978 | debug_check_no_locks_freed(objp, cachep->object_size); |
| 3981 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | 3979 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) |
| 3982 | debug_check_no_obj_freed(objp, cachep->object_size); | 3980 | debug_check_no_obj_freed(objp, cachep->object_size); |
| 3983 | __cache_free(cachep, objp, __builtin_return_address(0)); | 3981 | __cache_free(cachep, objp, _RET_IP_); |
| 3984 | local_irq_restore(flags); | 3982 | local_irq_restore(flags); |
| 3985 | 3983 | ||
| 3986 | trace_kmem_cache_free(_RET_IP_, objp); | 3984 | trace_kmem_cache_free(_RET_IP_, objp); |
| @@ -4011,7 +4009,7 @@ void kfree(const void *objp) | |||
| 4011 | debug_check_no_locks_freed(objp, c->object_size); | 4009 | debug_check_no_locks_freed(objp, c->object_size); |
| 4012 | 4010 | ||
| 4013 | debug_check_no_obj_freed(objp, c->object_size); | 4011 | debug_check_no_obj_freed(objp, c->object_size); |
| 4014 | __cache_free(c, (void *)objp, __builtin_return_address(0)); | 4012 | __cache_free(c, (void *)objp, _RET_IP_); |
| 4015 | local_irq_restore(flags); | 4013 | local_irq_restore(flags); |
| 4016 | } | 4014 | } |
| 4017 | EXPORT_SYMBOL(kfree); | 4015 | EXPORT_SYMBOL(kfree); |
