aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-01 15:54:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-01 18:49:02 -0400
commit8302294f43250dc337108c51882a6007f2b1e2e0 (patch)
tree85acd4440799c46a372df9cad170fa0c21e59096 /mm/slub.c
parent4fe70410d9a219dabb47328effccae7e7f2a6e26 (diff)
parent2e572895bf3203e881356a4039ab0fa428ed2639 (diff)
Merge branch 'tracing/core-v2' into tracing-for-linus
Conflicts: include/linux/slub_def.h lib/Kconfig.debug mm/slob.c mm/slub.c
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c83
1 files changed, 75 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c4ea9158c9fb..7aaa121d0ea9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1618 1619
1619void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1620void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1620{ 1621{
1621 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1625 s->objsize, s->size, gfpflags);
1626
1627 return ret;
1622} 1628}
1623EXPORT_SYMBOL(kmem_cache_alloc); 1629EXPORT_SYMBOL(kmem_cache_alloc);
1624 1630
1631#ifdef CONFIG_KMEMTRACE
1632void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1633{
1634 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1635}
1636EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1637#endif
1638
1625#ifdef CONFIG_NUMA 1639#ifdef CONFIG_NUMA
1626void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1640void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1627{ 1641{
1628 return slab_alloc(s, gfpflags, node, _RET_IP_); 1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node);
1646
1647 return ret;
1629} 1648}
1630EXPORT_SYMBOL(kmem_cache_alloc_node); 1649EXPORT_SYMBOL(kmem_cache_alloc_node);
1631#endif 1650#endif
1632 1651
1652#ifdef CONFIG_KMEMTRACE
1653void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1654 gfp_t gfpflags,
1655 int node)
1656{
1657 return slab_alloc(s, gfpflags, node, _RET_IP_);
1658}
1659EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1660#endif
1661
1633/* 1662/*
1634 * Slow patch handling. This may still be called frequently since objects 1663 * Slow patch handling. This may still be called frequently since objects
1635 * have a longer lifetime than the cpu slabs in most processing loads. 1664 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1737 page = virt_to_head_page(x); 1766 page = virt_to_head_page(x);
1738 1767
1739 slab_free(s, page, x, _RET_IP_); 1768 slab_free(s, page, x, _RET_IP_);
1769
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1740} 1771}
1741EXPORT_SYMBOL(kmem_cache_free); 1772EXPORT_SYMBOL(kmem_cache_free);
1742 1773
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2659void *__kmalloc(size_t size, gfp_t flags) 2690void *__kmalloc(size_t size, gfp_t flags)
2660{ 2691{
2661 struct kmem_cache *s; 2692 struct kmem_cache *s;
2693 void *ret;
2662 2694
2663 if (unlikely(size > SLUB_MAX_SIZE)) 2695 if (unlikely(size > SLUB_MAX_SIZE))
2664 return kmalloc_large(size, flags); 2696 return kmalloc_large(size, flags);
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2668 if (unlikely(ZERO_OR_NULL_PTR(s))) 2700 if (unlikely(ZERO_OR_NULL_PTR(s)))
2669 return s; 2701 return s;
2670 2702
2671 return slab_alloc(s, flags, -1, _RET_IP_); 2703 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2706 size, s->size, flags);
2707
2708 return ret;
2672} 2709}
2673EXPORT_SYMBOL(__kmalloc); 2710EXPORT_SYMBOL(__kmalloc);
2674 2711
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2687void *__kmalloc_node(size_t size, gfp_t flags, int node) 2724void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688{ 2725{
2689 struct kmem_cache *s; 2726 struct kmem_cache *s;
2727 void *ret;
2690 2728
2691 if (unlikely(size > SLUB_MAX_SIZE)) 2729 if (unlikely(size > SLUB_MAX_SIZE)) {
2692 return kmalloc_large_node(size, flags, node); 2730 ret = kmalloc_large_node(size, flags, node);
2731
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2733 _RET_IP_, ret,
2734 size, PAGE_SIZE << get_order(size),
2735 flags, node);
2736
2737 return ret;
2738 }
2693 2739
2694 s = get_slab(size, flags); 2740 s = get_slab(size, flags);
2695 2741
2696 if (unlikely(ZERO_OR_NULL_PTR(s))) 2742 if (unlikely(ZERO_OR_NULL_PTR(s)))
2697 return s; 2743 return s;
2698 2744
2699 return slab_alloc(s, flags, node, _RET_IP_); 2745 ret = slab_alloc(s, flags, node, _RET_IP_);
2746
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2748 size, s->size, flags, node);
2749
2750 return ret;
2700} 2751}
2701EXPORT_SYMBOL(__kmalloc_node); 2752EXPORT_SYMBOL(__kmalloc_node);
2702#endif 2753#endif
@@ -2755,6 +2806,8 @@ void kfree(const void *x)
2755 return; 2806 return;
2756 } 2807 }
2757 slab_free(page->slab, page, object, _RET_IP_); 2808 slab_free(page->slab, page, object, _RET_IP_);
2809
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2758} 2811}
2759EXPORT_SYMBOL(kfree); 2812EXPORT_SYMBOL(kfree);
2760 2813
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3224void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3277void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3225{ 3278{
3226 struct kmem_cache *s; 3279 struct kmem_cache *s;
3280 void *ret;
3227 3281
3228 if (unlikely(size > SLUB_MAX_SIZE)) 3282 if (unlikely(size > SLUB_MAX_SIZE))
3229 return kmalloc_large(size, gfpflags); 3283 return kmalloc_large(size, gfpflags);
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3233 if (unlikely(ZERO_OR_NULL_PTR(s))) 3287 if (unlikely(ZERO_OR_NULL_PTR(s)))
3234 return s; 3288 return s;
3235 3289
3236 return slab_alloc(s, gfpflags, -1, caller); 3290 ret = slab_alloc(s, gfpflags, -1, caller);
3291
3292 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3294 s->size, gfpflags);
3295
3296 return ret;
3237} 3297}
3238 3298
3239void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3299void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3240 int node, unsigned long caller) 3300 int node, unsigned long caller)
3241{ 3301{
3242 struct kmem_cache *s; 3302 struct kmem_cache *s;
3303 void *ret;
3243 3304
3244 if (unlikely(size > SLUB_MAX_SIZE)) 3305 if (unlikely(size > SLUB_MAX_SIZE))
3245 return kmalloc_large_node(size, gfpflags, node); 3306 return kmalloc_large_node(size, gfpflags, node);
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3249 if (unlikely(ZERO_OR_NULL_PTR(s))) 3310 if (unlikely(ZERO_OR_NULL_PTR(s)))
3250 return s; 3311 return s;
3251 3312
3252 return slab_alloc(s, gfpflags, node, caller); 3313 ret = slab_alloc(s, gfpflags, node, caller);
3314
3315 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3317 size, s->size, gfpflags, node);
3318
3319 return ret;
3253} 3320}
3254 3321
3255#ifdef CONFIG_SLUB_DEBUG 3322#ifdef CONFIG_SLUB_DEBUG