diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2008-08-10 13:14:05 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-12-29 08:34:04 -0500 |
commit | 36555751c6751a5bdfd6d7bdf0648343bb1ef0de (patch) | |
tree | 47ed7ab2c8971e4c5d2f5a902860b1cf9facbc42 | |
parent | aa46a7e0228c0477708ce44a0c5621902b3c157c (diff) |
kmemtrace: SLAB hooks.
This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.
We also convert some inline functions to __always_inline to make sure
_RET_IP_, which expands to __builtin_return_address(0), always works
as expected.
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | include/linux/slab_def.h | 68 | ||||
-rw-r--r-- | mm/slab.c | 71 |
2 files changed, 123 insertions, 16 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8ebe..7555ce99f6d2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/kmemtrace.h> | ||
17 | 18 | ||
18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
19 | struct cache_sizes { | 20 | struct cache_sizes { |
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
30 | 31 | ||
31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
35 | #else | ||
36 | static __always_inline void * | ||
37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
32 | { | 38 | { |
39 | return kmem_cache_alloc(cachep, flags); | ||
40 | } | ||
41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
48 | { | ||
49 | struct kmem_cache *cachep; | ||
50 | void *ret; | ||
51 | |||
33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 53 | int i = 0; |
35 | 54 | ||
@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
50 | found: | 69 | found: |
51 | #ifdef CONFIG_ZONE_DMA | 70 | #ifdef CONFIG_ZONE_DMA |
52 | if (flags & GFP_DMA) | 71 | if (flags & GFP_DMA) |
53 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 72 | cachep = malloc_sizes[i].cs_dmacachep; |
54 | flags); | 73 | else |
55 | #endif | 74 | #endif |
56 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 75 | cachep = malloc_sizes[i].cs_cachep; |
76 | |||
77 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
78 | |||
79 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
80 | size, slab_buffer_size(cachep), flags); | ||
81 | |||
82 | return ret; | ||
57 | } | 83 | } |
58 | return __kmalloc(size, flags); | 84 | return __kmalloc(size, flags); |
59 | } | 85 | } |
@@ -62,8 +88,25 @@ found: | |||
62 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 88 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
63 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 89 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
64 | 90 | ||
65 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 91 | #ifdef CONFIG_KMEMTRACE |
92 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
93 | gfp_t flags, | ||
94 | int nodeid); | ||
95 | #else | ||
96 | static __always_inline void * | ||
97 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
98 | gfp_t flags, | ||
99 | int nodeid) | ||
100 | { | ||
101 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
66 | { | 106 | { |
107 | struct kmem_cache *cachep; | ||
108 | void *ret; | ||
109 | |||
67 | if (__builtin_constant_p(size)) { | 110 | if (__builtin_constant_p(size)) { |
68 | int i = 0; | 111 | int i = 0; |
69 | 112 | ||
@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
84 | found: | 127 | found: |
85 | #ifdef CONFIG_ZONE_DMA | 128 | #ifdef CONFIG_ZONE_DMA |
86 | if (flags & GFP_DMA) | 129 | if (flags & GFP_DMA) |
87 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 130 | cachep = malloc_sizes[i].cs_dmacachep; |
88 | flags, node); | 131 | else |
89 | #endif | 132 | #endif |
90 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 133 | cachep = malloc_sizes[i].cs_cachep; |
91 | flags, node); | 134 | |
135 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
136 | |||
137 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, | ||
138 | ret, size, slab_buffer_size(cachep), | ||
139 | flags, node); | ||
140 | |||
141 | return ret; | ||
92 | } | 142 | } |
93 | return __kmalloc_node(size, flags, node); | 143 | return __kmalloc_node(size, flags, node); |
94 | } | 144 | } |
@@ -112,6 +112,7 @@ | |||
112 | #include <linux/rtmutex.h> | 112 | #include <linux/rtmutex.h> |
113 | #include <linux/reciprocal_div.h> | 113 | #include <linux/reciprocal_div.h> |
114 | #include <linux/debugobjects.h> | 114 | #include <linux/debugobjects.h> |
115 | #include <linux/kmemtrace.h> | ||
115 | 116 | ||
116 | #include <asm/cacheflush.h> | 117 | #include <asm/cacheflush.h> |
117 | #include <asm/tlbflush.h> | 118 | #include <asm/tlbflush.h> |
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
568 | 569 | ||
569 | #endif | 570 | #endif |
570 | 571 | ||
572 | #ifdef CONFIG_KMEMTRACE | ||
573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
574 | { | ||
575 | return cachep->buffer_size; | ||
576 | } | ||
577 | EXPORT_SYMBOL(slab_buffer_size); | ||
578 | #endif | ||
579 | |||
571 | /* | 580 | /* |
572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
573 | */ | 582 | */ |
@@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3613 | */ | 3622 | */ |
3614 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3623 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3615 | { | 3624 | { |
3616 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3625 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3626 | |||
3627 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3628 | obj_size(cachep), cachep->buffer_size, flags); | ||
3629 | |||
3630 | return ret; | ||
3617 | } | 3631 | } |
3618 | EXPORT_SYMBOL(kmem_cache_alloc); | 3632 | EXPORT_SYMBOL(kmem_cache_alloc); |
3619 | 3633 | ||
3634 | #ifdef CONFIG_KMEMTRACE | ||
3635 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
3636 | { | ||
3637 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3638 | } | ||
3639 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3640 | #endif | ||
3641 | |||
3620 | /** | 3642 | /** |
3621 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3643 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3622 | * @cachep: the cache we're checking against | 3644 | * @cachep: the cache we're checking against |
@@ -3661,23 +3683,47 @@ out: | |||
3661 | #ifdef CONFIG_NUMA | 3683 | #ifdef CONFIG_NUMA |
3662 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3684 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3663 | { | 3685 | { |
3664 | return __cache_alloc_node(cachep, flags, nodeid, | 3686 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
3665 | __builtin_return_address(0)); | 3687 | __builtin_return_address(0)); |
3688 | |||
3689 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3690 | obj_size(cachep), cachep->buffer_size, | ||
3691 | flags, nodeid); | ||
3692 | |||
3693 | return ret; | ||
3666 | } | 3694 | } |
3667 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3695 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3668 | 3696 | ||
3697 | #ifdef CONFIG_KMEMTRACE | ||
3698 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
3699 | gfp_t flags, | ||
3700 | int nodeid) | ||
3701 | { | ||
3702 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3703 | __builtin_return_address(0)); | ||
3704 | } | ||
3705 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
3706 | #endif | ||
3707 | |||
3669 | static __always_inline void * | 3708 | static __always_inline void * |
3670 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3709 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3671 | { | 3710 | { |
3672 | struct kmem_cache *cachep; | 3711 | struct kmem_cache *cachep; |
3712 | void *ret; | ||
3673 | 3713 | ||
3674 | cachep = kmem_find_general_cachep(size, flags); | 3714 | cachep = kmem_find_general_cachep(size, flags); |
3675 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3715 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3676 | return cachep; | 3716 | return cachep; |
3677 | return kmem_cache_alloc_node(cachep, flags, node); | 3717 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
3718 | |||
3719 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
3720 | (unsigned long) caller, ret, | ||
3721 | size, cachep->buffer_size, flags, node); | ||
3722 | |||
3723 | return ret; | ||
3678 | } | 3724 | } |
3679 | 3725 | ||
3680 | #ifdef CONFIG_DEBUG_SLAB | 3726 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3681 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3727 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3682 | { | 3728 | { |
3683 | return __do_kmalloc_node(size, flags, node, | 3729 | return __do_kmalloc_node(size, flags, node, |
@@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3710 | void *caller) | 3756 | void *caller) |
3711 | { | 3757 | { |
3712 | struct kmem_cache *cachep; | 3758 | struct kmem_cache *cachep; |
3759 | void *ret; | ||
3713 | 3760 | ||
3714 | /* If you want to save a few bytes .text space: replace | 3761 | /* If you want to save a few bytes .text space: replace |
3715 | * __ with kmem_. | 3762 | * __ with kmem_. |
@@ -3719,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3719 | cachep = __find_general_cachep(size, flags); | 3766 | cachep = __find_general_cachep(size, flags); |
3720 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3767 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3721 | return cachep; | 3768 | return cachep; |
3722 | return __cache_alloc(cachep, flags, caller); | 3769 | ret = __cache_alloc(cachep, flags, caller); |
3770 | |||
3771 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
3772 | (unsigned long) caller, ret, | ||
3773 | size, cachep->buffer_size, flags); | ||
3774 | |||
3775 | return ret; | ||
3723 | } | 3776 | } |
3724 | 3777 | ||
3725 | 3778 | ||
3726 | #ifdef CONFIG_DEBUG_SLAB | 3779 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3727 | void *__kmalloc(size_t size, gfp_t flags) | 3780 | void *__kmalloc(size_t size, gfp_t flags) |
3728 | { | 3781 | { |
3729 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3782 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
@@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3762 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3815 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3763 | __cache_free(cachep, objp); | 3816 | __cache_free(cachep, objp); |
3764 | local_irq_restore(flags); | 3817 | local_irq_restore(flags); |
3818 | |||
3819 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
3765 | } | 3820 | } |
3766 | EXPORT_SYMBOL(kmem_cache_free); | 3821 | EXPORT_SYMBOL(kmem_cache_free); |
3767 | 3822 | ||
@@ -3788,6 +3843,8 @@ void kfree(const void *objp) | |||
3788 | debug_check_no_obj_freed(objp, obj_size(c)); | 3843 | debug_check_no_obj_freed(objp, obj_size(c)); |
3789 | __cache_free(c, (void *)objp); | 3844 | __cache_free(c, (void *)objp); |
3790 | local_irq_restore(flags); | 3845 | local_irq_restore(flags); |
3846 | |||
3847 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
3791 | } | 3848 | } |
3792 | EXPORT_SYMBOL(kfree); | 3849 | EXPORT_SYMBOL(kfree); |
3793 | 3850 | ||