aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c83
1 files changed, 75 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..3525e7b21d19 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1623 1624
1624void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1625void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1625{ 1626{
1626 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1627 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1628
1629 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1630 s->objsize, s->size, gfpflags);
1631
1632 return ret;
1627} 1633}
1628EXPORT_SYMBOL(kmem_cache_alloc); 1634EXPORT_SYMBOL(kmem_cache_alloc);
1629 1635
1636#ifdef CONFIG_KMEMTRACE
1637void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1638{
1639 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1640}
1641EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1642#endif
1643
1630#ifdef CONFIG_NUMA 1644#ifdef CONFIG_NUMA
1631void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1645void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1632{ 1646{
1633 return slab_alloc(s, gfpflags, node, _RET_IP_); 1647 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1648
1649 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1650 s->objsize, s->size, gfpflags, node);
1651
1652 return ret;
1634} 1653}
1635EXPORT_SYMBOL(kmem_cache_alloc_node); 1654EXPORT_SYMBOL(kmem_cache_alloc_node);
1636#endif 1655#endif
1637 1656
1657#ifdef CONFIG_KMEMTRACE
1658void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1659 gfp_t gfpflags,
1660 int node)
1661{
1662 return slab_alloc(s, gfpflags, node, _RET_IP_);
1663}
1664EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1665#endif
1666
1638/* 1667/*
1639 * Slow patch handling. This may still be called frequently since objects 1668 * Slow patch handling. This may still be called frequently since objects
1640 * have a longer lifetime than the cpu slabs in most processing loads. 1669 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1742 page = virt_to_head_page(x); 1771 page = virt_to_head_page(x);
1743 1772
1744 slab_free(s, page, x, _RET_IP_); 1773 slab_free(s, page, x, _RET_IP_);
1774
1775 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1745} 1776}
1746EXPORT_SYMBOL(kmem_cache_free); 1777EXPORT_SYMBOL(kmem_cache_free);
1747 1778
@@ -2657,6 +2688,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2657void *__kmalloc(size_t size, gfp_t flags) 2688void *__kmalloc(size_t size, gfp_t flags)
2658{ 2689{
2659 struct kmem_cache *s; 2690 struct kmem_cache *s;
2691 void *ret;
2660 2692
2661 if (unlikely(size > PAGE_SIZE)) 2693 if (unlikely(size > PAGE_SIZE))
2662 return kmalloc_large(size, flags); 2694 return kmalloc_large(size, flags);
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2666 if (unlikely(ZERO_OR_NULL_PTR(s))) 2698 if (unlikely(ZERO_OR_NULL_PTR(s)))
2667 return s; 2699 return s;
2668 2700
2669 return slab_alloc(s, flags, -1, _RET_IP_); 2701 ret = slab_alloc(s, flags, -1, _RET_IP_);
2702
2703 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2704 size, s->size, flags);
2705
2706 return ret;
2670} 2707}
2671EXPORT_SYMBOL(__kmalloc); 2708EXPORT_SYMBOL(__kmalloc);
2672 2709
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2685void *__kmalloc_node(size_t size, gfp_t flags, int node) 2722void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2723{
2687 struct kmem_cache *s; 2724 struct kmem_cache *s;
2725 void *ret;
2688 2726
2689 if (unlikely(size > PAGE_SIZE)) 2727 if (unlikely(size > PAGE_SIZE)) {
2690 return kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2729
2730 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2731 _RET_IP_, ret,
2732 size, PAGE_SIZE << get_order(size),
2733 flags, node);
2734
2735 return ret;
2736 }
2691 2737
2692 s = get_slab(size, flags); 2738 s = get_slab(size, flags);
2693 2739
2694 if (unlikely(ZERO_OR_NULL_PTR(s))) 2740 if (unlikely(ZERO_OR_NULL_PTR(s)))
2695 return s; 2741 return s;
2696 2742
2697 return slab_alloc(s, flags, node, _RET_IP_); 2743 ret = slab_alloc(s, flags, node, _RET_IP_);
2744
2745 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2746 size, s->size, flags, node);
2747
2748 return ret;
2698} 2749}
2699EXPORT_SYMBOL(__kmalloc_node); 2750EXPORT_SYMBOL(__kmalloc_node);
2700#endif 2751#endif
@@ -2753,6 +2804,8 @@ void kfree(const void *x)
2753 return; 2804 return;
2754 } 2805 }
2755 slab_free(page->slab, page, object, _RET_IP_); 2806 slab_free(page->slab, page, object, _RET_IP_);
2807
2808 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2756} 2809}
2757EXPORT_SYMBOL(kfree); 2810EXPORT_SYMBOL(kfree);
2758 2811
@@ -3222,6 +3275,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3222void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3275void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3223{ 3276{
3224 struct kmem_cache *s; 3277 struct kmem_cache *s;
3278 void *ret;
3225 3279
3226 if (unlikely(size > PAGE_SIZE)) 3280 if (unlikely(size > PAGE_SIZE))
3227 return kmalloc_large(size, gfpflags); 3281 return kmalloc_large(size, gfpflags);
@@ -3231,13 +3285,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3231 if (unlikely(ZERO_OR_NULL_PTR(s))) 3285 if (unlikely(ZERO_OR_NULL_PTR(s)))
3232 return s; 3286 return s;
3233 3287
3234 return slab_alloc(s, gfpflags, -1, caller); 3288 ret = slab_alloc(s, gfpflags, -1, caller);
3289
3290 /* Honor the call site pointer we recieved. */
3291 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3292 s->size, gfpflags);
3293
3294 return ret;
3235} 3295}
3236 3296
3237void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3297void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3238 int node, unsigned long caller) 3298 int node, unsigned long caller)
3239{ 3299{
3240 struct kmem_cache *s; 3300 struct kmem_cache *s;
3301 void *ret;
3241 3302
3242 if (unlikely(size > PAGE_SIZE)) 3303 if (unlikely(size > PAGE_SIZE))
3243 return kmalloc_large_node(size, gfpflags, node); 3304 return kmalloc_large_node(size, gfpflags, node);
@@ -3247,7 +3308,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3247 if (unlikely(ZERO_OR_NULL_PTR(s))) 3308 if (unlikely(ZERO_OR_NULL_PTR(s)))
3248 return s; 3309 return s;
3249 3310
3250 return slab_alloc(s, gfpflags, node, caller); 3311 ret = slab_alloc(s, gfpflags, node, caller);
3312
3313 /* Honor the call site pointer we recieved. */
3314 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3315 size, s->size, gfpflags, node);
3316
3317 return ret;
3251} 3318}
3252 3319
3253#ifdef CONFIG_SLUB_DEBUG 3320#ifdef CONFIG_SLUB_DEBUG