aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEzequiel Garcia <elezegarcia@gmail.com>2012-09-08 16:47:53 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-25 03:14:18 -0400
commitf3f741019595f1e73564d985f5fe8abcbb98c769 (patch)
tree6a46da5eb60a8bd2bf6bdaf5804e40367b91048f /mm
parentff4fcd01ec86d98d15d2fd96f22f19bb1d341b88 (diff)
mm, slob: Add support for kmalloc_track_caller()
Currently slob falls back to regular kmalloc for this case. With this patch kmalloc_track_caller() is correctly implemented, thus tracing the specified caller. This is important to trace accurately allocations performed by krealloc, kstrdup, kmemdup, etc. Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 191e1713a6d9..dd47d16d57b6 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -425,7 +425,8 @@ out:
425 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. 425 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
426 */ 426 */
427 427
428void *__kmalloc_node(size_t size, gfp_t gfp, int node) 428static __always_inline void *
429__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
429{ 430{
430 unsigned int *m; 431 unsigned int *m;
431 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 432 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
446 *m = size; 447 *m = size;
447 ret = (void *)m + align; 448 ret = (void *)m + align;
448 449
449 trace_kmalloc_node(_RET_IP_, ret, 450 trace_kmalloc_node(caller, ret,
450 size, size + align, gfp, node); 451 size, size + align, gfp, node);
451 } else { 452 } else {
452 unsigned int order = get_order(size); 453 unsigned int order = get_order(size);
@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
460 page->private = size; 461 page->private = size;
461 } 462 }
462 463
463 trace_kmalloc_node(_RET_IP_, ret, 464 trace_kmalloc_node(caller, ret,
464 size, PAGE_SIZE << order, gfp, node); 465 size, PAGE_SIZE << order, gfp, node);
465 } 466 }
466 467
467 kmemleak_alloc(ret, size, 1, gfp); 468 kmemleak_alloc(ret, size, 1, gfp);
468 return ret; 469 return ret;
469} 470}
471
472void *__kmalloc_node(size_t size, gfp_t gfp, int node)
473{
474 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
475}
470EXPORT_SYMBOL(__kmalloc_node); 476EXPORT_SYMBOL(__kmalloc_node);
471 477
478#ifdef CONFIG_TRACING
479void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
480{
481 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
482}
483
484#ifdef CONFIG_NUMA
485void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
486 int node, unsigned long caller)
487{
488 return __do_kmalloc_node(size, gfp, node, caller);
489}
490#endif
491#endif
492
472void kfree(const void *block) 493void kfree(const void *block)
473{ 494{
474 struct page *sp; 495 struct page *sp;