aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slub.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a6d043e13266..f074f756405a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2313,7 +2313,7 @@ new_slab:
2313 * 2313 *
2314 * Otherwise we can simply pick the next object from the lockless free list. 2314 * Otherwise we can simply pick the next object from the lockless free list.
2315 */ 2315 */
2316static __always_inline void *slab_alloc(struct kmem_cache *s, 2316static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2317 gfp_t gfpflags, int node, unsigned long addr) 2317 gfp_t gfpflags, int node, unsigned long addr)
2318{ 2318{
2319 void **object; 2319 void **object;
@@ -2383,9 +2383,15 @@ redo:
2383 return object; 2383 return object;
2384} 2384}
2385 2385
2386static __always_inline void *slab_alloc(struct kmem_cache *s,
2387 gfp_t gfpflags, unsigned long addr)
2388{
2389 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2390}
2391
2386void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2392void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2387{ 2393{
2388 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2394 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2389 2395
2390 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2396 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
2391 2397
@@ -2396,7 +2402,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
2396#ifdef CONFIG_TRACING 2402#ifdef CONFIG_TRACING
2397void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2403void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2398{ 2404{
2399 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2405 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2400 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2406 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2401 return ret; 2407 return ret;
2402} 2408}
@@ -2414,7 +2420,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
2414#ifdef CONFIG_NUMA 2420#ifdef CONFIG_NUMA
2415void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2421void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2416{ 2422{
2417 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2423 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2418 2424
2419 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2425 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2420 s->object_size, s->size, gfpflags, node); 2426 s->object_size, s->size, gfpflags, node);
@@ -2428,7 +2434,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2428 gfp_t gfpflags, 2434 gfp_t gfpflags,
2429 int node, size_t size) 2435 int node, size_t size)
2430{ 2436{
2431 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2437 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2432 2438
2433 trace_kmalloc_node(_RET_IP_, ret, 2439 trace_kmalloc_node(_RET_IP_, ret,
2434 size, s->size, gfpflags, node); 2440 size, s->size, gfpflags, node);
@@ -3366,7 +3372,7 @@ void *__kmalloc(size_t size, gfp_t flags)
3366 if (unlikely(ZERO_OR_NULL_PTR(s))) 3372 if (unlikely(ZERO_OR_NULL_PTR(s)))
3367 return s; 3373 return s;
3368 3374
3369 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 3375 ret = slab_alloc(s, flags, _RET_IP_);
3370 3376
3371 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3377 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3372 3378
@@ -3409,7 +3415,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3409 if (unlikely(ZERO_OR_NULL_PTR(s))) 3415 if (unlikely(ZERO_OR_NULL_PTR(s)))
3410 return s; 3416 return s;
3411 3417
3412 ret = slab_alloc(s, flags, node, _RET_IP_); 3418 ret = slab_alloc_node(s, flags, node, _RET_IP_);
3413 3419
3414 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3420 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3415 3421
@@ -4037,7 +4043,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4037 if (unlikely(ZERO_OR_NULL_PTR(s))) 4043 if (unlikely(ZERO_OR_NULL_PTR(s)))
4038 return s; 4044 return s;
4039 4045
4040 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 4046 ret = slab_alloc(s, gfpflags, caller);
4041 4047
4042 /* Honor the call site pointer we received. */ 4048 /* Honor the call site pointer we received. */
4043 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4049 trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -4067,7 +4073,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4067 if (unlikely(ZERO_OR_NULL_PTR(s))) 4073 if (unlikely(ZERO_OR_NULL_PTR(s)))
4068 return s; 4074 return s;
4069 4075
4070 ret = slab_alloc(s, gfpflags, node, caller); 4076 ret = slab_alloc_node(s, gfpflags, node, caller);
4071 4077
4072 /* Honor the call site pointer we received. */ 4078 /* Honor the call site pointer we received. */
4073 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4079 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);