aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>2008-08-19 13:43:26 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-12-29 08:34:07 -0500
commit5b882be4e00e53a44f47ad7eb997cac2938848bf (patch)
treea3f21e64af805bf4c13ac98dda50b0b06678d039 /mm
parent3eae2cb24a96509e0a38cc48dc1538a2826f4e33 (diff)
kmemtrace: SLUB hooks.
This adds hooks for the SLUB allocator, to allow tracing with kmemtrace. Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c65
1 files changed, 59 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 06da86654875..4c48a0146afd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/memory.h> 25#include <linux/memory.h>
26#include <linux/math64.h> 26#include <linux/math64.h>
27#include <linux/kmemtrace.h>
27 28
28/* 29/*
29 * Lock order: 30 * Lock order:
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1613 1614
1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1615void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1615{ 1616{
1616 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1617 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1618
1619 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1620 s->objsize, s->size, gfpflags);
1621
1622 return ret;
1617} 1623}
1618EXPORT_SYMBOL(kmem_cache_alloc); 1624EXPORT_SYMBOL(kmem_cache_alloc);
1619 1625
1626#ifdef CONFIG_KMEMTRACE
1627void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1628{
1629 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1630}
1631EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1632#endif
1633
1620#ifdef CONFIG_NUMA 1634#ifdef CONFIG_NUMA
1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1635void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1622{ 1636{
1623 return slab_alloc(s, gfpflags, node, _RET_IP_); 1637 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1638
1639 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1640 s->objsize, s->size, gfpflags, node);
1641
1642 return ret;
1624} 1643}
1625EXPORT_SYMBOL(kmem_cache_alloc_node); 1644EXPORT_SYMBOL(kmem_cache_alloc_node);
1626#endif 1645#endif
1627 1646
1647#ifdef CONFIG_KMEMTRACE
1648void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1649 gfp_t gfpflags,
1650 int node)
1651{
1652 return slab_alloc(s, gfpflags, node, _RET_IP_);
1653}
1654EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1655#endif
1656
1628/* 1657/*
1629 * Slow patch handling. This may still be called frequently since objects 1658 * Slow patch handling. This may still be called frequently since objects
1630 * have a longer lifetime than the cpu slabs in most processing loads. 1659 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1732,6 +1761,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1732 page = virt_to_head_page(x); 1761 page = virt_to_head_page(x);
1733 1762
1734 slab_free(s, page, x, _RET_IP_); 1763 slab_free(s, page, x, _RET_IP_);
1764
1765 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1735} 1766}
1736EXPORT_SYMBOL(kmem_cache_free); 1767EXPORT_SYMBOL(kmem_cache_free);
1737 1768
@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2650void *__kmalloc(size_t size, gfp_t flags) 2681void *__kmalloc(size_t size, gfp_t flags)
2651{ 2682{
2652 struct kmem_cache *s; 2683 struct kmem_cache *s;
2684 void *ret;
2653 2685
2654 if (unlikely(size > PAGE_SIZE)) 2686 if (unlikely(size > PAGE_SIZE))
2655 return kmalloc_large(size, flags); 2687 return kmalloc_large(size, flags);
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2659 if (unlikely(ZERO_OR_NULL_PTR(s))) 2691 if (unlikely(ZERO_OR_NULL_PTR(s)))
2660 return s; 2692 return s;
2661 2693
2662 return slab_alloc(s, flags, -1, _RET_IP_); 2694 ret = slab_alloc(s, flags, -1, _RET_IP_);
2695
2696 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2697 size, s->size, flags);
2698
2699 return ret;
2663} 2700}
2664EXPORT_SYMBOL(__kmalloc); 2701EXPORT_SYMBOL(__kmalloc);
2665 2702
@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2678void *__kmalloc_node(size_t size, gfp_t flags, int node) 2715void *__kmalloc_node(size_t size, gfp_t flags, int node)
2679{ 2716{
2680 struct kmem_cache *s; 2717 struct kmem_cache *s;
2718 void *ret;
2681 2719
2682 if (unlikely(size > PAGE_SIZE)) 2720 if (unlikely(size > PAGE_SIZE)) {
2683 return kmalloc_large_node(size, flags, node); 2721 ret = kmalloc_large_node(size, flags, node);
2722
2723 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2724 _RET_IP_, ret,
2725 size, PAGE_SIZE << get_order(size),
2726 flags, node);
2727
2728 return ret;
2729 }
2684 2730
2685 s = get_slab(size, flags); 2731 s = get_slab(size, flags);
2686 2732
2687 if (unlikely(ZERO_OR_NULL_PTR(s))) 2733 if (unlikely(ZERO_OR_NULL_PTR(s)))
2688 return s; 2734 return s;
2689 2735
2690 return slab_alloc(s, flags, node, _RET_IP_); 2736 ret = slab_alloc(s, flags, node, _RET_IP_);
2737
2738 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2739 size, s->size, flags, node);
2740
2741 return ret;
2691} 2742}
2692EXPORT_SYMBOL(__kmalloc_node); 2743EXPORT_SYMBOL(__kmalloc_node);
2693#endif 2744#endif
@@ -2745,6 +2796,8 @@ void kfree(const void *x)
2745 return; 2796 return;
2746 } 2797 }
2747 slab_free(page->slab, page, object, _RET_IP_); 2798 slab_free(page->slab, page, object, _RET_IP_);
2799
2800 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2748} 2801}
2749EXPORT_SYMBOL(kfree); 2802EXPORT_SYMBOL(kfree);
2750 2803