aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c71
-rw-r--r--mm/slob.c35
-rw-r--r--mm/slub.c83
3 files changed, 169 insertions, 20 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d7d1414a5285..4fc1761c6dc8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
102#include <linux/cpu.h> 102#include <linux/cpu.h>
103#include <linux/sysctl.h> 103#include <linux/sysctl.h>
104#include <linux/module.h> 104#include <linux/module.h>
105#include <trace/kmemtrace.h>
105#include <linux/rcupdate.h> 106#include <linux/rcupdate.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/uaccess.h> 108#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3554 */ 3563 */
3555void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3564void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3556{ 3565{
3557 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags);
3570
3571 return ret;
3558} 3572}
3559EXPORT_SYMBOL(kmem_cache_alloc); 3573EXPORT_SYMBOL(kmem_cache_alloc);
3560 3574
3575#ifdef CONFIG_KMEMTRACE
3576void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3577{
3578 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3579}
3580EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3581#endif
3582
3561/** 3583/**
3562 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3584 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3563 * @cachep: the cache we're checking against 3585 * @cachep: the cache we're checking against
@@ -3602,23 +3624,47 @@ out:
3602#ifdef CONFIG_NUMA 3624#ifdef CONFIG_NUMA
3603void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3625void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3604{ 3626{
3605 return __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3606 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid);
3633
3634 return ret;
3607} 3635}
3608EXPORT_SYMBOL(kmem_cache_alloc_node); 3636EXPORT_SYMBOL(kmem_cache_alloc_node);
3609 3637
3638#ifdef CONFIG_KMEMTRACE
3639void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3640 gfp_t flags,
3641 int nodeid)
3642{
3643 return __cache_alloc_node(cachep, flags, nodeid,
3644 __builtin_return_address(0));
3645}
3646EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3647#endif
3648
3610static __always_inline void * 3649static __always_inline void *
3611__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3650__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3612{ 3651{
3613 struct kmem_cache *cachep; 3652 struct kmem_cache *cachep;
3653 void *ret;
3614 3654
3615 cachep = kmem_find_general_cachep(size, flags); 3655 cachep = kmem_find_general_cachep(size, flags);
3616 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3617 return cachep; 3657 return cachep;
3618 return kmem_cache_alloc_node(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
3661 (unsigned long) caller, ret,
3662 size, cachep->buffer_size, flags, node);
3663
3664 return ret;
3619} 3665}
3620 3666
3621#ifdef CONFIG_DEBUG_SLAB 3667#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3622void *__kmalloc_node(size_t size, gfp_t flags, int node) 3668void *__kmalloc_node(size_t size, gfp_t flags, int node)
3623{ 3669{
3624 return __do_kmalloc_node(size, flags, node, 3670 return __do_kmalloc_node(size, flags, node,
@@ -3651,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3651 void *caller) 3697 void *caller)
3652{ 3698{
3653 struct kmem_cache *cachep; 3699 struct kmem_cache *cachep;
3700 void *ret;
3654 3701
3655 /* If you want to save a few bytes .text space: replace 3702 /* If you want to save a few bytes .text space: replace
3656 * __ with kmem_. 3703 * __ with kmem_.
@@ -3660,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3660 cachep = __find_general_cachep(size, flags); 3707 cachep = __find_general_cachep(size, flags);
3661 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3708 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3662 return cachep; 3709 return cachep;
3663 return __cache_alloc(cachep, flags, caller); 3710 ret = __cache_alloc(cachep, flags, caller);
3711
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
3713 (unsigned long) caller, ret,
3714 size, cachep->buffer_size, flags);
3715
3716 return ret;
3664} 3717}
3665 3718
3666 3719
3667#ifdef CONFIG_DEBUG_SLAB 3720#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3668void *__kmalloc(size_t size, gfp_t flags) 3721void *__kmalloc(size_t size, gfp_t flags)
3669{ 3722{
3670 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3723 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3703,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3703 debug_check_no_obj_freed(objp, obj_size(cachep)); 3756 debug_check_no_obj_freed(objp, obj_size(cachep));
3704 __cache_free(cachep, objp); 3757 __cache_free(cachep, objp);
3705 local_irq_restore(flags); 3758 local_irq_restore(flags);
3759
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
3706} 3761}
3707EXPORT_SYMBOL(kmem_cache_free); 3762EXPORT_SYMBOL(kmem_cache_free);
3708 3763
@@ -3729,6 +3784,8 @@ void kfree(const void *objp)
3729 debug_check_no_obj_freed(objp, obj_size(c)); 3784 debug_check_no_obj_freed(objp, obj_size(c));
3730 __cache_free(c, (void *)objp); 3785 __cache_free(c, (void *)objp);
3731 local_irq_restore(flags); 3786 local_irq_restore(flags);
3787
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3732} 3789}
3733EXPORT_SYMBOL(kfree); 3790EXPORT_SYMBOL(kfree);
3734 3791
diff --git a/mm/slob.c b/mm/slob.c
index 7a3411524dac..4dd6516447f2 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
474{ 475{
475 unsigned int *m; 476 unsigned int *m;
476 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 477 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
478 void *ret;
477 479
478 lockdep_trace_alloc(gfp); 480 lockdep_trace_alloc(gfp);
479 481
@@ -482,12 +484,17 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
482 return ZERO_SIZE_PTR; 484 return ZERO_SIZE_PTR;
483 485
484 m = slob_alloc(size + align, gfp, align, node); 486 m = slob_alloc(size + align, gfp, align, node);
487
485 if (!m) 488 if (!m)
486 return NULL; 489 return NULL;
487 *m = size; 490 *m = size;
488 return (void *)m + align; 491 ret = (void *)m + align;
492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, size + align, gfp, node);
489 } else { 496 } else {
490 void *ret; 497 unsigned int order = get_order(size);
491 498
492 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 499 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
493 if (ret) { 500 if (ret) {
@@ -495,8 +502,13 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
495 page = virt_to_page(ret); 502 page = virt_to_page(ret);
496 page->private = size; 503 page->private = size;
497 } 504 }
498 return ret; 505
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
507 _RET_IP_, ret,
508 size, PAGE_SIZE << order, gfp, node);
499 } 509 }
510
511 return ret;
500} 512}
501EXPORT_SYMBOL(__kmalloc_node); 513EXPORT_SYMBOL(__kmalloc_node);
502 514
@@ -514,6 +526,8 @@ void kfree(const void *block)
514 slob_free(m, *m + align); 526 slob_free(m, *m + align);
515 } else 527 } else
516 put_page(&sp->page); 528 put_page(&sp->page);
529
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
517} 531}
518EXPORT_SYMBOL(kfree); 532EXPORT_SYMBOL(kfree);
519 533
@@ -583,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
583{ 597{
584 void *b; 598 void *b;
585 599
586 if (c->size < PAGE_SIZE) 600 if (c->size < PAGE_SIZE) {
587 b = slob_alloc(c->size, flags, c->align, node); 601 b = slob_alloc(c->size, flags, c->align, node);
588 else 602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
603 _RET_IP_, b, c->size,
604 SLOB_UNITS(c->size) * SLOB_UNIT,
605 flags, node);
606 } else {
589 b = slob_new_pages(flags, get_order(c->size), node); 607 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
609 _RET_IP_, b, c->size,
610 PAGE_SIZE << get_order(c->size),
611 flags, node);
612 }
590 613
591 if (c->ctor) 614 if (c->ctor)
592 c->ctor(b); 615 c->ctor(b);
@@ -622,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
622 } else { 645 } else {
623 __kmem_cache_free(b, c->size); 646 __kmem_cache_free(b, c->size);
624 } 647 }
648
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
625} 650}
626EXPORT_SYMBOL(kmem_cache_free); 651EXPORT_SYMBOL(kmem_cache_free);
627 652
diff --git a/mm/slub.c b/mm/slub.c
index c4ea9158c9fb..7aaa121d0ea9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1618 1619
1619void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1620void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1620{ 1621{
1621 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623
1624 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1625 s->objsize, s->size, gfpflags);
1626
1627 return ret;
1622} 1628}
1623EXPORT_SYMBOL(kmem_cache_alloc); 1629EXPORT_SYMBOL(kmem_cache_alloc);
1624 1630
1631#ifdef CONFIG_KMEMTRACE
1632void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1633{
1634 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1635}
1636EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1637#endif
1638
1625#ifdef CONFIG_NUMA 1639#ifdef CONFIG_NUMA
1626void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1640void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1627{ 1641{
1628 return slab_alloc(s, gfpflags, node, _RET_IP_); 1642 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1643
1644 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1645 s->objsize, s->size, gfpflags, node);
1646
1647 return ret;
1629} 1648}
1630EXPORT_SYMBOL(kmem_cache_alloc_node); 1649EXPORT_SYMBOL(kmem_cache_alloc_node);
1631#endif 1650#endif
1632 1651
1652#ifdef CONFIG_KMEMTRACE
1653void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1654 gfp_t gfpflags,
1655 int node)
1656{
1657 return slab_alloc(s, gfpflags, node, _RET_IP_);
1658}
1659EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1660#endif
1661
1633/* 1662/*
1634 * Slow patch handling. This may still be called frequently since objects 1663 * Slow patch handling. This may still be called frequently since objects
1635 * have a longer lifetime than the cpu slabs in most processing loads. 1664 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1737 page = virt_to_head_page(x); 1766 page = virt_to_head_page(x);
1738 1767
1739 slab_free(s, page, x, _RET_IP_); 1768 slab_free(s, page, x, _RET_IP_);
1769
1770 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1740} 1771}
1741EXPORT_SYMBOL(kmem_cache_free); 1772EXPORT_SYMBOL(kmem_cache_free);
1742 1773
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2659void *__kmalloc(size_t size, gfp_t flags) 2690void *__kmalloc(size_t size, gfp_t flags)
2660{ 2691{
2661 struct kmem_cache *s; 2692 struct kmem_cache *s;
2693 void *ret;
2662 2694
2663 if (unlikely(size > SLUB_MAX_SIZE)) 2695 if (unlikely(size > SLUB_MAX_SIZE))
2664 return kmalloc_large(size, flags); 2696 return kmalloc_large(size, flags);
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2668 if (unlikely(ZERO_OR_NULL_PTR(s))) 2700 if (unlikely(ZERO_OR_NULL_PTR(s)))
2669 return s; 2701 return s;
2670 2702
2671 return slab_alloc(s, flags, -1, _RET_IP_); 2703 ret = slab_alloc(s, flags, -1, _RET_IP_);
2704
2705 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2706 size, s->size, flags);
2707
2708 return ret;
2672} 2709}
2673EXPORT_SYMBOL(__kmalloc); 2710EXPORT_SYMBOL(__kmalloc);
2674 2711
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2687void *__kmalloc_node(size_t size, gfp_t flags, int node) 2724void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688{ 2725{
2689 struct kmem_cache *s; 2726 struct kmem_cache *s;
2727 void *ret;
2690 2728
2691 if (unlikely(size > SLUB_MAX_SIZE)) 2729 if (unlikely(size > SLUB_MAX_SIZE)) {
2692 return kmalloc_large_node(size, flags, node); 2730 ret = kmalloc_large_node(size, flags, node);
2731
2732 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2733 _RET_IP_, ret,
2734 size, PAGE_SIZE << get_order(size),
2735 flags, node);
2736
2737 return ret;
2738 }
2693 2739
2694 s = get_slab(size, flags); 2740 s = get_slab(size, flags);
2695 2741
2696 if (unlikely(ZERO_OR_NULL_PTR(s))) 2742 if (unlikely(ZERO_OR_NULL_PTR(s)))
2697 return s; 2743 return s;
2698 2744
2699 return slab_alloc(s, flags, node, _RET_IP_); 2745 ret = slab_alloc(s, flags, node, _RET_IP_);
2746
2747 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2748 size, s->size, flags, node);
2749
2750 return ret;
2700} 2751}
2701EXPORT_SYMBOL(__kmalloc_node); 2752EXPORT_SYMBOL(__kmalloc_node);
2702#endif 2753#endif
@@ -2755,6 +2806,8 @@ void kfree(const void *x)
2755 return; 2806 return;
2756 } 2807 }
2757 slab_free(page->slab, page, object, _RET_IP_); 2808 slab_free(page->slab, page, object, _RET_IP_);
2809
2810 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2758} 2811}
2759EXPORT_SYMBOL(kfree); 2812EXPORT_SYMBOL(kfree);
2760 2813
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3224void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3277void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3225{ 3278{
3226 struct kmem_cache *s; 3279 struct kmem_cache *s;
3280 void *ret;
3227 3281
3228 if (unlikely(size > SLUB_MAX_SIZE)) 3282 if (unlikely(size > SLUB_MAX_SIZE))
3229 return kmalloc_large(size, gfpflags); 3283 return kmalloc_large(size, gfpflags);
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3233 if (unlikely(ZERO_OR_NULL_PTR(s))) 3287 if (unlikely(ZERO_OR_NULL_PTR(s)))
3234 return s; 3288 return s;
3235 3289
3236 return slab_alloc(s, gfpflags, -1, caller); 3290 ret = slab_alloc(s, gfpflags, -1, caller);
3291
3292 /* Honor the call site pointer we recieved. */
3293 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3294 s->size, gfpflags);
3295
3296 return ret;
3237} 3297}
3238 3298
3239void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3299void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3240 int node, unsigned long caller) 3300 int node, unsigned long caller)
3241{ 3301{
3242 struct kmem_cache *s; 3302 struct kmem_cache *s;
3303 void *ret;
3243 3304
3244 if (unlikely(size > SLUB_MAX_SIZE)) 3305 if (unlikely(size > SLUB_MAX_SIZE))
3245 return kmalloc_large_node(size, gfpflags, node); 3306 return kmalloc_large_node(size, gfpflags, node);
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3249 if (unlikely(ZERO_OR_NULL_PTR(s))) 3310 if (unlikely(ZERO_OR_NULL_PTR(s)))
3250 return s; 3311 return s;
3251 3312
3252 return slab_alloc(s, gfpflags, node, caller); 3313 ret = slab_alloc(s, gfpflags, node, caller);
3314
3315 /* Honor the call site pointer we recieved. */
3316 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3317 size, s->size, gfpflags, node);
3318
3319 return ret;
3253} 3320}
3254 3321
3255#ifdef CONFIG_SLUB_DEBUG 3322#ifdef CONFIG_SLUB_DEBUG