diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 83 |
1 files changed, 75 insertions, 8 deletions
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <trace/kmemtrace.h> | ||
19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1623 | 1624 | ||
1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1625 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1625 | { | 1626 | { |
1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1627 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1628 | |||
1629 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1630 | s->objsize, s->size, gfpflags); | ||
1631 | |||
1632 | return ret; | ||
1627 | } | 1633 | } |
1628 | EXPORT_SYMBOL(kmem_cache_alloc); | 1634 | EXPORT_SYMBOL(kmem_cache_alloc); |
1629 | 1635 | ||
1636 | #ifdef CONFIG_KMEMTRACE | ||
1637 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1638 | { | ||
1639 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1640 | } | ||
1641 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1642 | #endif | ||
1643 | |||
1630 | #ifdef CONFIG_NUMA | 1644 | #ifdef CONFIG_NUMA |
1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1645 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1632 | { | 1646 | { |
1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1647 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1648 | |||
1649 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1650 | s->objsize, s->size, gfpflags, node); | ||
1651 | |||
1652 | return ret; | ||
1634 | } | 1653 | } |
1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1654 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1636 | #endif | 1655 | #endif |
1637 | 1656 | ||
1657 | #ifdef CONFIG_KMEMTRACE | ||
1658 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1659 | gfp_t gfpflags, | ||
1660 | int node) | ||
1661 | { | ||
1662 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1663 | } | ||
1664 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1665 | #endif | ||
1666 | |||
1638 | /* | 1667 | /* |
1639 | * Slow patch handling. This may still be called frequently since objects | 1668 | * Slow patch handling. This may still be called frequently since objects |
1640 | * have a longer lifetime than the cpu slabs in most processing loads. | 1669 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1742 | page = virt_to_head_page(x); | 1771 | page = virt_to_head_page(x); |
1743 | 1772 | ||
1744 | slab_free(s, page, x, _RET_IP_); | 1773 | slab_free(s, page, x, _RET_IP_); |
1774 | |||
1775 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1745 | } | 1776 | } |
1746 | EXPORT_SYMBOL(kmem_cache_free); | 1777 | EXPORT_SYMBOL(kmem_cache_free); |
1747 | 1778 | ||
@@ -2657,6 +2688,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2657 | void *__kmalloc(size_t size, gfp_t flags) | 2688 | void *__kmalloc(size_t size, gfp_t flags) |
2658 | { | 2689 | { |
2659 | struct kmem_cache *s; | 2690 | struct kmem_cache *s; |
2691 | void *ret; | ||
2660 | 2692 | ||
2661 | if (unlikely(size > PAGE_SIZE)) | 2693 | if (unlikely(size > PAGE_SIZE)) |
2662 | return kmalloc_large(size, flags); | 2694 | return kmalloc_large(size, flags); |
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2698 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2667 | return s; | 2699 | return s; |
2668 | 2700 | ||
2669 | return slab_alloc(s, flags, -1, _RET_IP_); | 2701 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2702 | |||
2703 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2704 | size, s->size, flags); | ||
2705 | |||
2706 | return ret; | ||
2670 | } | 2707 | } |
2671 | EXPORT_SYMBOL(__kmalloc); | 2708 | EXPORT_SYMBOL(__kmalloc); |
2672 | 2709 | ||
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2685 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2722 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2686 | { | 2723 | { |
2687 | struct kmem_cache *s; | 2724 | struct kmem_cache *s; |
2725 | void *ret; | ||
2688 | 2726 | ||
2689 | if (unlikely(size > PAGE_SIZE)) | 2727 | if (unlikely(size > PAGE_SIZE)) { |
2690 | return kmalloc_large_node(size, flags, node); | 2728 | ret = kmalloc_large_node(size, flags, node); |
2729 | |||
2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2731 | _RET_IP_, ret, | ||
2732 | size, PAGE_SIZE << get_order(size), | ||
2733 | flags, node); | ||
2734 | |||
2735 | return ret; | ||
2736 | } | ||
2691 | 2737 | ||
2692 | s = get_slab(size, flags); | 2738 | s = get_slab(size, flags); |
2693 | 2739 | ||
2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2740 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2695 | return s; | 2741 | return s; |
2696 | 2742 | ||
2697 | return slab_alloc(s, flags, node, _RET_IP_); | 2743 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2744 | |||
2745 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2746 | size, s->size, flags, node); | ||
2747 | |||
2748 | return ret; | ||
2698 | } | 2749 | } |
2699 | EXPORT_SYMBOL(__kmalloc_node); | 2750 | EXPORT_SYMBOL(__kmalloc_node); |
2700 | #endif | 2751 | #endif |
@@ -2752,6 +2803,8 @@ void kfree(const void *x) | |||
2752 | return; | 2803 | return; |
2753 | } | 2804 | } |
2754 | slab_free(page->slab, page, object, _RET_IP_); | 2805 | slab_free(page->slab, page, object, _RET_IP_); |
2806 | |||
2807 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2755 | } | 2808 | } |
2756 | EXPORT_SYMBOL(kfree); | 2809 | EXPORT_SYMBOL(kfree); |
2757 | 2810 | ||
@@ -3221,6 +3274,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3221 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3274 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3222 | { | 3275 | { |
3223 | struct kmem_cache *s; | 3276 | struct kmem_cache *s; |
3277 | void *ret; | ||
3224 | 3278 | ||
3225 | if (unlikely(size > PAGE_SIZE)) | 3279 | if (unlikely(size > PAGE_SIZE)) |
3226 | return kmalloc_large(size, gfpflags); | 3280 | return kmalloc_large(size, gfpflags); |
@@ -3230,13 +3284,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3284 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3231 | return s; | 3285 | return s; |
3232 | 3286 | ||
3233 | return slab_alloc(s, gfpflags, -1, caller); | 3287 | ret = slab_alloc(s, gfpflags, -1, caller); |
3288 | |||
3289 | /* Honor the call site pointer we recieved. */ | ||
3290 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3291 | s->size, gfpflags); | ||
3292 | |||
3293 | return ret; | ||
3234 | } | 3294 | } |
3235 | 3295 | ||
3236 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3296 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3237 | int node, unsigned long caller) | 3297 | int node, unsigned long caller) |
3238 | { | 3298 | { |
3239 | struct kmem_cache *s; | 3299 | struct kmem_cache *s; |
3300 | void *ret; | ||
3240 | 3301 | ||
3241 | if (unlikely(size > PAGE_SIZE)) | 3302 | if (unlikely(size > PAGE_SIZE)) |
3242 | return kmalloc_large_node(size, gfpflags, node); | 3303 | return kmalloc_large_node(size, gfpflags, node); |
@@ -3246,7 +3307,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3246 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3307 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3247 | return s; | 3308 | return s; |
3248 | 3309 | ||
3249 | return slab_alloc(s, gfpflags, node, caller); | 3310 | ret = slab_alloc(s, gfpflags, node, caller); |
3311 | |||
3312 | /* Honor the call site pointer we recieved. */ | ||
3313 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3314 | size, s->size, gfpflags, node); | ||
3315 | |||
3316 | return ret; | ||
3250 | } | 3317 | } |
3251 | 3318 | ||
3252 | #ifdef CONFIG_SLUB_DEBUG | 3319 | #ifdef CONFIG_SLUB_DEBUG |