diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 83 |
1 files changed, 75 insertions, 8 deletions
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
| 18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
| 19 | #include <trace/kmemtrace.h> | ||
| 19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
| 20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
| 21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
| @@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1618 | 1619 | ||
| 1619 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1620 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| 1620 | { | 1621 | { |
| 1621 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1622 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
| 1623 | |||
| 1624 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1625 | s->objsize, s->size, gfpflags); | ||
| 1626 | |||
| 1627 | return ret; | ||
| 1622 | } | 1628 | } |
| 1623 | EXPORT_SYMBOL(kmem_cache_alloc); | 1629 | EXPORT_SYMBOL(kmem_cache_alloc); |
| 1624 | 1630 | ||
| 1631 | #ifdef CONFIG_KMEMTRACE | ||
| 1632 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
| 1633 | { | ||
| 1634 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
| 1635 | } | ||
| 1636 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
| 1637 | #endif | ||
| 1638 | |||
| 1625 | #ifdef CONFIG_NUMA | 1639 | #ifdef CONFIG_NUMA |
| 1626 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1640 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 1627 | { | 1641 | { |
| 1628 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1642 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
| 1643 | |||
| 1644 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1645 | s->objsize, s->size, gfpflags, node); | ||
| 1646 | |||
| 1647 | return ret; | ||
| 1629 | } | 1648 | } |
| 1630 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1649 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 1631 | #endif | 1650 | #endif |
| 1632 | 1651 | ||
| 1652 | #ifdef CONFIG_KMEMTRACE | ||
| 1653 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
| 1654 | gfp_t gfpflags, | ||
| 1655 | int node) | ||
| 1656 | { | ||
| 1657 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
| 1658 | } | ||
| 1659 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
| 1660 | #endif | ||
| 1661 | |||
| 1633 | /* | 1662 | /* |
| 1634 | * Slow patch handling. This may still be called frequently since objects | 1663 | * Slow patch handling. This may still be called frequently since objects |
| 1635 | * have a longer lifetime than the cpu slabs in most processing loads. | 1664 | * have a longer lifetime than the cpu slabs in most processing loads. |
| @@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 1737 | page = virt_to_head_page(x); | 1766 | page = virt_to_head_page(x); |
| 1738 | 1767 | ||
| 1739 | slab_free(s, page, x, _RET_IP_); | 1768 | slab_free(s, page, x, _RET_IP_); |
| 1769 | |||
| 1770 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
| 1740 | } | 1771 | } |
| 1741 | EXPORT_SYMBOL(kmem_cache_free); | 1772 | EXPORT_SYMBOL(kmem_cache_free); |
| 1742 | 1773 | ||
| @@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
| 2659 | void *__kmalloc(size_t size, gfp_t flags) | 2690 | void *__kmalloc(size_t size, gfp_t flags) |
| 2660 | { | 2691 | { |
| 2661 | struct kmem_cache *s; | 2692 | struct kmem_cache *s; |
| 2693 | void *ret; | ||
| 2662 | 2694 | ||
| 2663 | if (unlikely(size > SLUB_MAX_SIZE)) | 2695 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2664 | return kmalloc_large(size, flags); | 2696 | return kmalloc_large(size, flags); |
| @@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2668 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2700 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2669 | return s; | 2701 | return s; |
| 2670 | 2702 | ||
| 2671 | return slab_alloc(s, flags, -1, _RET_IP_); | 2703 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
| 2704 | |||
| 2705 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2706 | size, s->size, flags); | ||
| 2707 | |||
| 2708 | return ret; | ||
| 2672 | } | 2709 | } |
| 2673 | EXPORT_SYMBOL(__kmalloc); | 2710 | EXPORT_SYMBOL(__kmalloc); |
| 2674 | 2711 | ||
| @@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
| 2687 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2724 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 2688 | { | 2725 | { |
| 2689 | struct kmem_cache *s; | 2726 | struct kmem_cache *s; |
| 2727 | void *ret; | ||
| 2690 | 2728 | ||
| 2691 | if (unlikely(size > SLUB_MAX_SIZE)) | 2729 | if (unlikely(size > SLUB_MAX_SIZE)) { |
| 2692 | return kmalloc_large_node(size, flags, node); | 2730 | ret = kmalloc_large_node(size, flags, node); |
| 2731 | |||
| 2732 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
| 2733 | _RET_IP_, ret, | ||
| 2734 | size, PAGE_SIZE << get_order(size), | ||
| 2735 | flags, node); | ||
| 2736 | |||
| 2737 | return ret; | ||
| 2738 | } | ||
| 2693 | 2739 | ||
| 2694 | s = get_slab(size, flags); | 2740 | s = get_slab(size, flags); |
| 2695 | 2741 | ||
| 2696 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2742 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2697 | return s; | 2743 | return s; |
| 2698 | 2744 | ||
| 2699 | return slab_alloc(s, flags, node, _RET_IP_); | 2745 | ret = slab_alloc(s, flags, node, _RET_IP_); |
| 2746 | |||
| 2747 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2748 | size, s->size, flags, node); | ||
| 2749 | |||
| 2750 | return ret; | ||
| 2700 | } | 2751 | } |
| 2701 | EXPORT_SYMBOL(__kmalloc_node); | 2752 | EXPORT_SYMBOL(__kmalloc_node); |
| 2702 | #endif | 2753 | #endif |
| @@ -2755,6 +2806,8 @@ void kfree(const void *x) | |||
| 2755 | return; | 2806 | return; |
| 2756 | } | 2807 | } |
| 2757 | slab_free(page->slab, page, object, _RET_IP_); | 2808 | slab_free(page->slab, page, object, _RET_IP_); |
| 2809 | |||
| 2810 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
| 2758 | } | 2811 | } |
| 2759 | EXPORT_SYMBOL(kfree); | 2812 | EXPORT_SYMBOL(kfree); |
| 2760 | 2813 | ||
| @@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
| 3224 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3277 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
| 3225 | { | 3278 | { |
| 3226 | struct kmem_cache *s; | 3279 | struct kmem_cache *s; |
| 3280 | void *ret; | ||
| 3227 | 3281 | ||
| 3228 | if (unlikely(size > SLUB_MAX_SIZE)) | 3282 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3229 | return kmalloc_large(size, gfpflags); | 3283 | return kmalloc_large(size, gfpflags); |
| @@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3233 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3287 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3234 | return s; | 3288 | return s; |
| 3235 | 3289 | ||
| 3236 | return slab_alloc(s, gfpflags, -1, caller); | 3290 | ret = slab_alloc(s, gfpflags, -1, caller); |
| 3291 | |||
| 3292 | /* Honor the call site pointer we recieved. */ | ||
| 3293 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
| 3294 | s->size, gfpflags); | ||
| 3295 | |||
| 3296 | return ret; | ||
| 3237 | } | 3297 | } |
| 3238 | 3298 | ||
| 3239 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3299 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
| 3240 | int node, unsigned long caller) | 3300 | int node, unsigned long caller) |
| 3241 | { | 3301 | { |
| 3242 | struct kmem_cache *s; | 3302 | struct kmem_cache *s; |
| 3303 | void *ret; | ||
| 3243 | 3304 | ||
| 3244 | if (unlikely(size > SLUB_MAX_SIZE)) | 3305 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3245 | return kmalloc_large_node(size, gfpflags, node); | 3306 | return kmalloc_large_node(size, gfpflags, node); |
| @@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3249 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3310 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3250 | return s; | 3311 | return s; |
| 3251 | 3312 | ||
| 3252 | return slab_alloc(s, gfpflags, node, caller); | 3313 | ret = slab_alloc(s, gfpflags, node, caller); |
| 3314 | |||
| 3315 | /* Honor the call site pointer we recieved. */ | ||
| 3316 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
| 3317 | size, s->size, gfpflags, node); | ||
| 3318 | |||
| 3319 | return ret; | ||
| 3253 | } | 3320 | } |
| 3254 | 3321 | ||
| 3255 | #ifdef CONFIG_SLUB_DEBUG | 3322 | #ifdef CONFIG_SLUB_DEBUG |
