diff options
author | Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> | 2008-08-19 13:43:26 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-12-29 08:34:07 -0500 |
commit | 5b882be4e00e53a44f47ad7eb997cac2938848bf (patch) | |
tree | a3f21e64af805bf4c13ac98dda50b0b06678d039 | |
parent | 3eae2cb24a96509e0a38cc48dc1538a2826f4e33 (diff) |
kmemtrace: SLUB hooks.
This adds hooks for the SLUB allocator, to allow tracing with kmemtrace.
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | include/linux/slub_def.h | 53 | ||||
-rw-r--r-- | mm/slub.c | 65 |
2 files changed, 109 insertions, 9 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..dc28432b5b9a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <linux/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 205 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 206 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 207 | ||
208 | #ifdef CONFIG_KMEMTRACE | ||
209 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
210 | #else | ||
211 | static __always_inline void * | ||
212 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
213 | { | ||
214 | return kmem_cache_alloc(s, gfpflags); | ||
215 | } | ||
216 | #endif | ||
217 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 218 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 219 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 220 | unsigned int order = get_order(size); |
221 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
222 | |||
223 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
224 | size, PAGE_SIZE << order, flags); | ||
225 | |||
226 | return ret; | ||
210 | } | 227 | } |
211 | 228 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 229 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 230 | { |
231 | void *ret; | ||
232 | |||
214 | if (__builtin_constant_p(size)) { | 233 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 234 | if (size > PAGE_SIZE) |
216 | return kmalloc_large(size, flags); | 235 | return kmalloc_large(size, flags); |
@@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 240 | if (!s) |
222 | return ZERO_SIZE_PTR; | 241 | return ZERO_SIZE_PTR; |
223 | 242 | ||
224 | return kmem_cache_alloc(s, flags); | 243 | ret = kmem_cache_alloc_notrace(s, flags); |
244 | |||
245 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
246 | _THIS_IP_, ret, | ||
247 | size, s->size, flags); | ||
248 | |||
249 | return ret; | ||
225 | } | 250 | } |
226 | } | 251 | } |
227 | return __kmalloc(size, flags); | 252 | return __kmalloc(size, flags); |
@@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 256 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 257 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 258 | ||
259 | #ifdef CONFIG_KMEMTRACE | ||
260 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
261 | gfp_t gfpflags, | ||
262 | int node); | ||
263 | #else | ||
264 | static __always_inline void * | ||
265 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
266 | gfp_t gfpflags, | ||
267 | int node) | ||
268 | { | ||
269 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
270 | } | ||
271 | #endif | ||
272 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 273 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 274 | { |
275 | void *ret; | ||
276 | |||
236 | if (__builtin_constant_p(size) && | 277 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 278 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 279 | struct kmem_cache *s = kmalloc_slab(size); |
@@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
240 | if (!s) | 281 | if (!s) |
241 | return ZERO_SIZE_PTR; | 282 | return ZERO_SIZE_PTR; |
242 | 283 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 284 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
285 | |||
286 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
287 | _THIS_IP_, ret, | ||
288 | size, s->size, flags, node); | ||
289 | |||
290 | return ret; | ||
244 | } | 291 | } |
245 | return __kmalloc_node(size, flags, node); | 292 | return __kmalloc_node(size, flags, node); |
246 | } | 293 | } |
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/memory.h> | 25 | #include <linux/memory.h> |
26 | #include <linux/math64.h> | 26 | #include <linux/math64.h> |
27 | #include <linux/kmemtrace.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Lock order: | 30 | * Lock order: |
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1613 | 1614 | ||
1614 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1615 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1615 | { | 1616 | { |
1616 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1617 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1618 | |||
1619 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1620 | s->objsize, s->size, gfpflags); | ||
1621 | |||
1622 | return ret; | ||
1617 | } | 1623 | } |
1618 | EXPORT_SYMBOL(kmem_cache_alloc); | 1624 | EXPORT_SYMBOL(kmem_cache_alloc); |
1619 | 1625 | ||
1626 | #ifdef CONFIG_KMEMTRACE | ||
1627 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1628 | { | ||
1629 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1630 | } | ||
1631 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1632 | #endif | ||
1633 | |||
1620 | #ifdef CONFIG_NUMA | 1634 | #ifdef CONFIG_NUMA |
1621 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1635 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1622 | { | 1636 | { |
1623 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1637 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1638 | |||
1639 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1640 | s->objsize, s->size, gfpflags, node); | ||
1641 | |||
1642 | return ret; | ||
1624 | } | 1643 | } |
1625 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1644 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1626 | #endif | 1645 | #endif |
1627 | 1646 | ||
1647 | #ifdef CONFIG_KMEMTRACE | ||
1648 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1649 | gfp_t gfpflags, | ||
1650 | int node) | ||
1651 | { | ||
1652 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1653 | } | ||
1654 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1655 | #endif | ||
1656 | |||
1628 | /* | 1657 | /* |
1629 | * Slow patch handling. This may still be called frequently since objects | 1658 | * Slow patch handling. This may still be called frequently since objects |
1630 | * have a longer lifetime than the cpu slabs in most processing loads. | 1659 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1732,6 +1761,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1732 | page = virt_to_head_page(x); | 1761 | page = virt_to_head_page(x); |
1733 | 1762 | ||
1734 | slab_free(s, page, x, _RET_IP_); | 1763 | slab_free(s, page, x, _RET_IP_); |
1764 | |||
1765 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1735 | } | 1766 | } |
1736 | EXPORT_SYMBOL(kmem_cache_free); | 1767 | EXPORT_SYMBOL(kmem_cache_free); |
1737 | 1768 | ||
@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2650 | void *__kmalloc(size_t size, gfp_t flags) | 2681 | void *__kmalloc(size_t size, gfp_t flags) |
2651 | { | 2682 | { |
2652 | struct kmem_cache *s; | 2683 | struct kmem_cache *s; |
2684 | void *ret; | ||
2653 | 2685 | ||
2654 | if (unlikely(size > PAGE_SIZE)) | 2686 | if (unlikely(size > PAGE_SIZE)) |
2655 | return kmalloc_large(size, flags); | 2687 | return kmalloc_large(size, flags); |
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2659 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2691 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2660 | return s; | 2692 | return s; |
2661 | 2693 | ||
2662 | return slab_alloc(s, flags, -1, _RET_IP_); | 2694 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2695 | |||
2696 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2697 | size, s->size, flags); | ||
2698 | |||
2699 | return ret; | ||
2663 | } | 2700 | } |
2664 | EXPORT_SYMBOL(__kmalloc); | 2701 | EXPORT_SYMBOL(__kmalloc); |
2665 | 2702 | ||
@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2678 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2715 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2679 | { | 2716 | { |
2680 | struct kmem_cache *s; | 2717 | struct kmem_cache *s; |
2718 | void *ret; | ||
2681 | 2719 | ||
2682 | if (unlikely(size > PAGE_SIZE)) | 2720 | if (unlikely(size > PAGE_SIZE)) { |
2683 | return kmalloc_large_node(size, flags, node); | 2721 | ret = kmalloc_large_node(size, flags, node); |
2722 | |||
2723 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2724 | _RET_IP_, ret, | ||
2725 | size, PAGE_SIZE << get_order(size), | ||
2726 | flags, node); | ||
2727 | |||
2728 | return ret; | ||
2729 | } | ||
2684 | 2730 | ||
2685 | s = get_slab(size, flags); | 2731 | s = get_slab(size, flags); |
2686 | 2732 | ||
2687 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2733 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2688 | return s; | 2734 | return s; |
2689 | 2735 | ||
2690 | return slab_alloc(s, flags, node, _RET_IP_); | 2736 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2737 | |||
2738 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2739 | size, s->size, flags, node); | ||
2740 | |||
2741 | return ret; | ||
2691 | } | 2742 | } |
2692 | EXPORT_SYMBOL(__kmalloc_node); | 2743 | EXPORT_SYMBOL(__kmalloc_node); |
2693 | #endif | 2744 | #endif |
@@ -2745,6 +2796,8 @@ void kfree(const void *x) | |||
2745 | return; | 2796 | return; |
2746 | } | 2797 | } |
2747 | slab_free(page->slab, page, object, _RET_IP_); | 2798 | slab_free(page->slab, page, object, _RET_IP_); |
2799 | |||
2800 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2748 | } | 2801 | } |
2749 | EXPORT_SYMBOL(kfree); | 2802 | EXPORT_SYMBOL(kfree); |
2750 | 2803 | ||