diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 11:38:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 11:38:01 -0500 |
commit | a1e8fad5900fa94adb500c6e0dfd60a307f7a3c9 (patch) | |
tree | c619277e587a99c90e76a1e7b63746af4d959d72 /mm | |
parent | e3166331a3288dd7184548896a1c7ab682f0dbe8 (diff) | |
parent | a45b0616e7ee9db4c1b2b9a4a79a974325fa9bf3 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Fix a crash during slabinfo -v
tracing/slab: Move kmalloc tracepoint out of inline code
slub: Fix slub_lock down/up imbalance
slub: Fix build breakage in Documentation/vm
slub tracing: move trace calls out of always inlined functions to reduce kernel code size
slub: move slabinfo.c to tools/slub/slabinfo.c
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 38 | ||||
-rw-r--r-- | mm/slub.c | 30 |
2 files changed, 46 insertions, 22 deletions
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3653 | EXPORT_SYMBOL(kmem_cache_alloc); | 3653 | EXPORT_SYMBOL(kmem_cache_alloc); |
3654 | 3654 | ||
3655 | #ifdef CONFIG_TRACING | 3655 | #ifdef CONFIG_TRACING |
3656 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | 3656 | void * |
3657 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) | ||
3657 | { | 3658 | { |
3658 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3659 | void *ret; |
3660 | |||
3661 | ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3662 | |||
3663 | trace_kmalloc(_RET_IP_, ret, | ||
3664 | size, slab_buffer_size(cachep), flags); | ||
3665 | return ret; | ||
3659 | } | 3666 | } |
3660 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 3667 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
3661 | #endif | 3668 | #endif |
3662 | 3669 | ||
3663 | #ifdef CONFIG_NUMA | 3670 | #ifdef CONFIG_NUMA |
@@ -3675,31 +3682,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3675 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3682 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3676 | 3683 | ||
3677 | #ifdef CONFIG_TRACING | 3684 | #ifdef CONFIG_TRACING |
3678 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | 3685 | void *kmem_cache_alloc_node_trace(size_t size, |
3679 | gfp_t flags, | 3686 | struct kmem_cache *cachep, |
3680 | int nodeid) | 3687 | gfp_t flags, |
3688 | int nodeid) | ||
3681 | { | 3689 | { |
3682 | return __cache_alloc_node(cachep, flags, nodeid, | 3690 | void *ret; |
3691 | |||
3692 | ret = __cache_alloc_node(cachep, flags, nodeid, | ||
3683 | __builtin_return_address(0)); | 3693 | __builtin_return_address(0)); |
3694 | trace_kmalloc_node(_RET_IP_, ret, | ||
3695 | size, slab_buffer_size(cachep), | ||
3696 | flags, nodeid); | ||
3697 | return ret; | ||
3684 | } | 3698 | } |
3685 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | 3699 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
3686 | #endif | 3700 | #endif |
3687 | 3701 | ||
3688 | static __always_inline void * | 3702 | static __always_inline void * |
3689 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3703 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3690 | { | 3704 | { |
3691 | struct kmem_cache *cachep; | 3705 | struct kmem_cache *cachep; |
3692 | void *ret; | ||
3693 | 3706 | ||
3694 | cachep = kmem_find_general_cachep(size, flags); | 3707 | cachep = kmem_find_general_cachep(size, flags); |
3695 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3708 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3696 | return cachep; | 3709 | return cachep; |
3697 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | 3710 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); |
3698 | |||
3699 | trace_kmalloc_node((unsigned long) caller, ret, | ||
3700 | size, cachep->buffer_size, flags, node); | ||
3701 | |||
3702 | return ret; | ||
3703 | } | 3711 | } |
3704 | 3712 | ||
3705 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) | 3713 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) |
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/math64.h> | 28 | #include <linux/math64.h> |
29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
30 | 30 | ||
31 | #include <trace/events/kmem.h> | ||
32 | |||
31 | /* | 33 | /* |
32 | * Lock order: | 34 | * Lock order: |
33 | * 1. slab_lock(page) | 35 | * 1. slab_lock(page) |
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | |||
1774 | EXPORT_SYMBOL(kmem_cache_alloc); | 1776 | EXPORT_SYMBOL(kmem_cache_alloc); |
1775 | 1777 | ||
1776 | #ifdef CONFIG_TRACING | 1778 | #ifdef CONFIG_TRACING |
1777 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 1779 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
1780 | { | ||
1781 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | ||
1782 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); | ||
1783 | return ret; | ||
1784 | } | ||
1785 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | ||
1786 | |||
1787 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
1778 | { | 1788 | { |
1779 | return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 1789 | void *ret = kmalloc_order(size, flags, order); |
1790 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
1791 | return ret; | ||
1780 | } | 1792 | } |
1781 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 1793 | EXPORT_SYMBOL(kmalloc_order_trace); |
1782 | #endif | 1794 | #endif |
1783 | 1795 | ||
1784 | #ifdef CONFIG_NUMA | 1796 | #ifdef CONFIG_NUMA |
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | |||
1794 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1806 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1795 | 1807 | ||
1796 | #ifdef CONFIG_TRACING | 1808 | #ifdef CONFIG_TRACING |
1797 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 1809 | void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
1798 | gfp_t gfpflags, | 1810 | gfp_t gfpflags, |
1799 | int node) | 1811 | int node, size_t size) |
1800 | { | 1812 | { |
1801 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1813 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1814 | |||
1815 | trace_kmalloc_node(_RET_IP_, ret, | ||
1816 | size, s->size, gfpflags, node); | ||
1817 | return ret; | ||
1802 | } | 1818 | } |
1803 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | 1819 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
1804 | #endif | 1820 | #endif |
1805 | #endif | 1821 | #endif |
1806 | 1822 | ||