aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-29 09:16:24 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-29 09:16:24 -0500
commit2ff9f9d9629bf9530fe2ab8d803d612761ffc059 (patch)
treeb22e3fddffbc0f58b1e1974f4819896d58b7bdaf /mm/slub.c
parent0f01f07fad4ee11d98fe6faa442afbeb0328a378 (diff)
parenta4900437f3d76761a1646cd90254ccb01714a9ed (diff)
Merge branch 'topic/kmemtrace' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6 into tracing/kmemtrace
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c123
1 files changed, 95 insertions, 28 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a2cd47d89e0a..4cd7bfd2ab2c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/memory.h> 25#include <linux/memory.h>
26#include <linux/math64.h> 26#include <linux/math64.h>
27#include <linux/kmemtrace.h>
27 28
28/* 29/*
29 * Lock order: 30 * Lock order:
@@ -178,7 +179,7 @@ static LIST_HEAD(slab_caches);
178 * Tracking user of a slab. 179 * Tracking user of a slab.
179 */ 180 */
180struct track { 181struct track {
181 void *addr; /* Called from address */ 182 unsigned long addr; /* Called from address */
182 int cpu; /* Was running on cpu */ 183 int cpu; /* Was running on cpu */
183 int pid; /* Pid context */ 184 int pid; /* Pid context */
184 unsigned long when; /* When did the operation occur */ 185 unsigned long when; /* When did the operation occur */
@@ -367,7 +368,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
367} 368}
368 369
369static void set_track(struct kmem_cache *s, void *object, 370static void set_track(struct kmem_cache *s, void *object,
370 enum track_item alloc, void *addr) 371 enum track_item alloc, unsigned long addr)
371{ 372{
372 struct track *p; 373 struct track *p;
373 374
@@ -391,8 +392,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
391 if (!(s->flags & SLAB_STORE_USER)) 392 if (!(s->flags & SLAB_STORE_USER))
392 return; 393 return;
393 394
394 set_track(s, object, TRACK_FREE, NULL); 395 set_track(s, object, TRACK_FREE, 0UL);
395 set_track(s, object, TRACK_ALLOC, NULL); 396 set_track(s, object, TRACK_ALLOC, 0UL);
396} 397}
397 398
398static void print_track(const char *s, struct track *t) 399static void print_track(const char *s, struct track *t)
@@ -401,7 +402,7 @@ static void print_track(const char *s, struct track *t)
401 return; 402 return;
402 403
403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 404 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404 s, t->addr, jiffies - t->when, t->cpu, t->pid); 405 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
405} 406}
406 407
407static void print_tracking(struct kmem_cache *s, void *object) 408static void print_tracking(struct kmem_cache *s, void *object)
@@ -866,7 +867,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
866} 867}
867 868
868static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 869static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869 void *object, void *addr) 870 void *object, unsigned long addr)
870{ 871{
871 if (!check_slab(s, page)) 872 if (!check_slab(s, page))
872 goto bad; 873 goto bad;
@@ -906,7 +907,7 @@ bad:
906} 907}
907 908
908static int free_debug_processing(struct kmem_cache *s, struct page *page, 909static int free_debug_processing(struct kmem_cache *s, struct page *page,
909 void *object, void *addr) 910 void *object, unsigned long addr)
910{ 911{
911 if (!check_slab(s, page)) 912 if (!check_slab(s, page))
912 goto fail; 913 goto fail;
@@ -1029,10 +1030,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
1029 struct page *page, void *object) {} 1030 struct page *page, void *object) {}
1030 1031
1031static inline int alloc_debug_processing(struct kmem_cache *s, 1032static inline int alloc_debug_processing(struct kmem_cache *s,
1032 struct page *page, void *object, void *addr) { return 0; } 1033 struct page *page, void *object, unsigned long addr) { return 0; }
1033 1034
1034static inline int free_debug_processing(struct kmem_cache *s, 1035static inline int free_debug_processing(struct kmem_cache *s,
1035 struct page *page, void *object, void *addr) { return 0; } 1036 struct page *page, void *object, unsigned long addr) { return 0; }
1036 1037
1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1038static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 { return 1; } 1039 { return 1; }
@@ -1499,8 +1500,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1499 * we need to allocate a new slab. This is the slowest path since it involves 1500 * we need to allocate a new slab. This is the slowest path since it involves
1500 * a call to the page allocator and the setup of a new slab. 1501 * a call to the page allocator and the setup of a new slab.
1501 */ 1502 */
1502static void *__slab_alloc(struct kmem_cache *s, 1503static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1503 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1504 unsigned long addr, struct kmem_cache_cpu *c)
1504{ 1505{
1505 void **object; 1506 void **object;
1506 struct page *new; 1507 struct page *new;
@@ -1584,7 +1585,7 @@ debug:
1584 * Otherwise we can simply pick the next object from the lockless free list. 1585 * Otherwise we can simply pick the next object from the lockless free list.
1585 */ 1586 */
1586static __always_inline void *slab_alloc(struct kmem_cache *s, 1587static __always_inline void *slab_alloc(struct kmem_cache *s,
1587 gfp_t gfpflags, int node, void *addr) 1588 gfp_t gfpflags, int node, unsigned long addr)
1588{ 1589{
1589 void **object; 1590 void **object;
1590 struct kmem_cache_cpu *c; 1591 struct kmem_cache_cpu *c;
@@ -1613,18 +1614,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1613 1614
1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1615void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1615{ 1616{
1616 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1617 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1618
1619 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1620 s->objsize, s->size, gfpflags);
1621
1622 return ret;
1617} 1623}
1618EXPORT_SYMBOL(kmem_cache_alloc); 1624EXPORT_SYMBOL(kmem_cache_alloc);
1619 1625
1626#ifdef CONFIG_KMEMTRACE
1627void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1628{
1629 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1630}
1631EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1632#endif
1633
1620#ifdef CONFIG_NUMA 1634#ifdef CONFIG_NUMA
1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1635void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1622{ 1636{
1623 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1637 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1638
1639 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1640 s->objsize, s->size, gfpflags, node);
1641
1642 return ret;
1624} 1643}
1625EXPORT_SYMBOL(kmem_cache_alloc_node); 1644EXPORT_SYMBOL(kmem_cache_alloc_node);
1626#endif 1645#endif
1627 1646
1647#ifdef CONFIG_KMEMTRACE
1648void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1649 gfp_t gfpflags,
1650 int node)
1651{
1652 return slab_alloc(s, gfpflags, node, _RET_IP_);
1653}
1654EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1655#endif
1656
1628/* 1657/*
1629 * Slow patch handling. This may still be called frequently since objects 1658 * Slow patch handling. This may still be called frequently since objects
1630 * have a longer lifetime than the cpu slabs in most processing loads. 1659 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1634,7 +1663,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1634 * handling required then we can return immediately. 1663 * handling required then we can return immediately.
1635 */ 1664 */
1636static void __slab_free(struct kmem_cache *s, struct page *page, 1665static void __slab_free(struct kmem_cache *s, struct page *page,
1637 void *x, void *addr, unsigned int offset) 1666 void *x, unsigned long addr, unsigned int offset)
1638{ 1667{
1639 void *prior; 1668 void *prior;
1640 void **object = (void *)x; 1669 void **object = (void *)x;
@@ -1704,7 +1733,7 @@ debug:
1704 * with all sorts of special processing. 1733 * with all sorts of special processing.
1705 */ 1734 */
1706static __always_inline void slab_free(struct kmem_cache *s, 1735static __always_inline void slab_free(struct kmem_cache *s,
1707 struct page *page, void *x, void *addr) 1736 struct page *page, void *x, unsigned long addr)
1708{ 1737{
1709 void **object = (void *)x; 1738 void **object = (void *)x;
1710 struct kmem_cache_cpu *c; 1739 struct kmem_cache_cpu *c;
@@ -1731,7 +1760,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1731 1760
1732 page = virt_to_head_page(x); 1761 page = virt_to_head_page(x);
1733 1762
1734 slab_free(s, page, x, __builtin_return_address(0)); 1763 slab_free(s, page, x, _RET_IP_);
1764
1765 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1735} 1766}
1736EXPORT_SYMBOL(kmem_cache_free); 1767EXPORT_SYMBOL(kmem_cache_free);
1737 1768
@@ -2650,6 +2681,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2650void *__kmalloc(size_t size, gfp_t flags) 2681void *__kmalloc(size_t size, gfp_t flags)
2651{ 2682{
2652 struct kmem_cache *s; 2683 struct kmem_cache *s;
2684 void *ret;
2653 2685
2654 if (unlikely(size > PAGE_SIZE)) 2686 if (unlikely(size > PAGE_SIZE))
2655 return kmalloc_large(size, flags); 2687 return kmalloc_large(size, flags);
@@ -2659,7 +2691,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2659 if (unlikely(ZERO_OR_NULL_PTR(s))) 2691 if (unlikely(ZERO_OR_NULL_PTR(s)))
2660 return s; 2692 return s;
2661 2693
2662 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2694 ret = slab_alloc(s, flags, -1, _RET_IP_);
2695
2696 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2697 size, s->size, flags);
2698
2699 return ret;
2663} 2700}
2664EXPORT_SYMBOL(__kmalloc); 2701EXPORT_SYMBOL(__kmalloc);
2665 2702
@@ -2678,16 +2715,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2678void *__kmalloc_node(size_t size, gfp_t flags, int node) 2715void *__kmalloc_node(size_t size, gfp_t flags, int node)
2679{ 2716{
2680 struct kmem_cache *s; 2717 struct kmem_cache *s;
2718 void *ret;
2681 2719
2682 if (unlikely(size > PAGE_SIZE)) 2720 if (unlikely(size > PAGE_SIZE)) {
2683 return kmalloc_large_node(size, flags, node); 2721 ret = kmalloc_large_node(size, flags, node);
2722
2723 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2724 _RET_IP_, ret,
2725 size, PAGE_SIZE << get_order(size),
2726 flags, node);
2727
2728 return ret;
2729 }
2684 2730
2685 s = get_slab(size, flags); 2731 s = get_slab(size, flags);
2686 2732
2687 if (unlikely(ZERO_OR_NULL_PTR(s))) 2733 if (unlikely(ZERO_OR_NULL_PTR(s)))
2688 return s; 2734 return s;
2689 2735
2690 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2736 ret = slab_alloc(s, flags, node, _RET_IP_);
2737
2738 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2739 size, s->size, flags, node);
2740
2741 return ret;
2691} 2742}
2692EXPORT_SYMBOL(__kmalloc_node); 2743EXPORT_SYMBOL(__kmalloc_node);
2693#endif 2744#endif
@@ -2744,7 +2795,9 @@ void kfree(const void *x)
2744 put_page(page); 2795 put_page(page);
2745 return; 2796 return;
2746 } 2797 }
2747 slab_free(page->slab, page, object, __builtin_return_address(0)); 2798 slab_free(page->slab, page, object, _RET_IP_);
2799
2800 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2748} 2801}
2749EXPORT_SYMBOL(kfree); 2802EXPORT_SYMBOL(kfree);
2750 2803
@@ -3202,9 +3255,10 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3202 3255
3203#endif 3256#endif
3204 3257
3205void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3258void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3206{ 3259{
3207 struct kmem_cache *s; 3260 struct kmem_cache *s;
3261 void *ret;
3208 3262
3209 if (unlikely(size > PAGE_SIZE)) 3263 if (unlikely(size > PAGE_SIZE))
3210 return kmalloc_large(size, gfpflags); 3264 return kmalloc_large(size, gfpflags);
@@ -3214,13 +3268,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3214 if (unlikely(ZERO_OR_NULL_PTR(s))) 3268 if (unlikely(ZERO_OR_NULL_PTR(s)))
3215 return s; 3269 return s;
3216 3270
3217 return slab_alloc(s, gfpflags, -1, caller); 3271 ret = slab_alloc(s, gfpflags, -1, caller);
3272
3273 /* Honor the call site pointer we recieved. */
3274 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3275 s->size, gfpflags);
3276
3277 return ret;
3218} 3278}
3219 3279
3220void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3280void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3221 int node, void *caller) 3281 int node, unsigned long caller)
3222{ 3282{
3223 struct kmem_cache *s; 3283 struct kmem_cache *s;
3284 void *ret;
3224 3285
3225 if (unlikely(size > PAGE_SIZE)) 3286 if (unlikely(size > PAGE_SIZE))
3226 return kmalloc_large_node(size, gfpflags, node); 3287 return kmalloc_large_node(size, gfpflags, node);
@@ -3230,7 +3291,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3230 if (unlikely(ZERO_OR_NULL_PTR(s))) 3291 if (unlikely(ZERO_OR_NULL_PTR(s)))
3231 return s; 3292 return s;
3232 3293
3233 return slab_alloc(s, gfpflags, node, caller); 3294 ret = slab_alloc(s, gfpflags, node, caller);
3295
3296 /* Honor the call site pointer we recieved. */
3297 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3298 size, s->size, gfpflags, node);
3299
3300 return ret;
3234} 3301}
3235 3302
3236#ifdef CONFIG_SLUB_DEBUG 3303#ifdef CONFIG_SLUB_DEBUG
@@ -3429,7 +3496,7 @@ static void resiliency_test(void) {};
3429 3496
3430struct location { 3497struct location {
3431 unsigned long count; 3498 unsigned long count;
3432 void *addr; 3499 unsigned long addr;
3433 long long sum_time; 3500 long long sum_time;
3434 long min_time; 3501 long min_time;
3435 long max_time; 3502 long max_time;
@@ -3477,7 +3544,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3477{ 3544{
3478 long start, end, pos; 3545 long start, end, pos;
3479 struct location *l; 3546 struct location *l;
3480 void *caddr; 3547 unsigned long caddr;
3481 unsigned long age = jiffies - track->when; 3548 unsigned long age = jiffies - track->when;
3482 3549
3483 start = -1; 3550 start = -1;