aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c71
-rw-r--r--mm/slob.c37
-rw-r--r--mm/slub.c83
3 files changed, 170 insertions, 21 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ddc41f337d58..dae716b32915 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
102#include <linux/cpu.h> 102#include <linux/cpu.h>
103#include <linux/sysctl.h> 103#include <linux/sysctl.h>
104#include <linux/module.h> 104#include <linux/module.h>
105#include <trace/kmemtrace.h>
105#include <linux/rcupdate.h> 106#include <linux/rcupdate.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/uaccess.h> 108#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -3550,10 +3559,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3550 */ 3559 */
3551void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3560void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3552{ 3561{
3553 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3562 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3563
3564 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3565 obj_size(cachep), cachep->buffer_size, flags);
3566
3567 return ret;
3554} 3568}
3555EXPORT_SYMBOL(kmem_cache_alloc); 3569EXPORT_SYMBOL(kmem_cache_alloc);
3556 3570
3571#ifdef CONFIG_KMEMTRACE
3572void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3573{
3574 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3575}
3576EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3577#endif
3578
3557/** 3579/**
3558 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3580 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3559 * @cachep: the cache we're checking against 3581 * @cachep: the cache we're checking against
@@ -3598,23 +3620,47 @@ out:
3598#ifdef CONFIG_NUMA 3620#ifdef CONFIG_NUMA
3599void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3621void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3600{ 3622{
3601 return __cache_alloc_node(cachep, flags, nodeid, 3623 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3602 __builtin_return_address(0)); 3624 __builtin_return_address(0));
3625
3626 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3627 obj_size(cachep), cachep->buffer_size,
3628 flags, nodeid);
3629
3630 return ret;
3603} 3631}
3604EXPORT_SYMBOL(kmem_cache_alloc_node); 3632EXPORT_SYMBOL(kmem_cache_alloc_node);
3605 3633
3634#ifdef CONFIG_KMEMTRACE
3635void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3636 gfp_t flags,
3637 int nodeid)
3638{
3639 return __cache_alloc_node(cachep, flags, nodeid,
3640 __builtin_return_address(0));
3641}
3642EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3643#endif
3644
3606static __always_inline void * 3645static __always_inline void *
3607__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3646__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3608{ 3647{
3609 struct kmem_cache *cachep; 3648 struct kmem_cache *cachep;
3649 void *ret;
3610 3650
3611 cachep = kmem_find_general_cachep(size, flags); 3651 cachep = kmem_find_general_cachep(size, flags);
3612 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3652 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3613 return cachep; 3653 return cachep;
3614 return kmem_cache_alloc_node(cachep, flags, node); 3654 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3655
3656 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
3657 (unsigned long) caller, ret,
3658 size, cachep->buffer_size, flags, node);
3659
3660 return ret;
3615} 3661}
3616 3662
3617#ifdef CONFIG_DEBUG_SLAB 3663#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3618void *__kmalloc_node(size_t size, gfp_t flags, int node) 3664void *__kmalloc_node(size_t size, gfp_t flags, int node)
3619{ 3665{
3620 return __do_kmalloc_node(size, flags, node, 3666 return __do_kmalloc_node(size, flags, node,
@@ -3647,6 +3693,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647 void *caller) 3693 void *caller)
3648{ 3694{
3649 struct kmem_cache *cachep; 3695 struct kmem_cache *cachep;
3696 void *ret;
3650 3697
3651 /* If you want to save a few bytes .text space: replace 3698 /* If you want to save a few bytes .text space: replace
3652 * __ with kmem_. 3699 * __ with kmem_.
@@ -3656,11 +3703,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3656 cachep = __find_general_cachep(size, flags); 3703 cachep = __find_general_cachep(size, flags);
3657 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3704 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3658 return cachep; 3705 return cachep;
3659 return __cache_alloc(cachep, flags, caller); 3706 ret = __cache_alloc(cachep, flags, caller);
3707
3708 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
3709 (unsigned long) caller, ret,
3710 size, cachep->buffer_size, flags);
3711
3712 return ret;
3660} 3713}
3661 3714
3662 3715
3663#ifdef CONFIG_DEBUG_SLAB 3716#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3664void *__kmalloc(size_t size, gfp_t flags) 3717void *__kmalloc(size_t size, gfp_t flags)
3665{ 3718{
3666 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3719 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3699,6 +3752,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3699 debug_check_no_obj_freed(objp, obj_size(cachep)); 3752 debug_check_no_obj_freed(objp, obj_size(cachep));
3700 __cache_free(cachep, objp); 3753 __cache_free(cachep, objp);
3701 local_irq_restore(flags); 3754 local_irq_restore(flags);
3755
3756 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
3702} 3757}
3703EXPORT_SYMBOL(kmem_cache_free); 3758EXPORT_SYMBOL(kmem_cache_free);
3704 3759
@@ -3725,6 +3780,8 @@ void kfree(const void *objp)
3725 debug_check_no_obj_freed(objp, obj_size(c)); 3780 debug_check_no_obj_freed(objp, obj_size(c));
3726 __cache_free(c, (void *)objp); 3781 __cache_free(c, (void *)objp);
3727 local_irq_restore(flags); 3782 local_irq_restore(flags);
3783
3784 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3728} 3785}
3729EXPORT_SYMBOL(kfree); 3786EXPORT_SYMBOL(kfree);
3730 3787
diff --git a/mm/slob.c b/mm/slob.c
index bf7e8fc3aed8..4d1c0fc33b6b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{ 464{
464 unsigned int *m; 465 unsigned int *m;
465 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 void *ret;
466 468
467 if (size < PAGE_SIZE - align) { 469 if (size < PAGE_SIZE - align) {
468 if (!size) 470 if (!size)
469 return ZERO_SIZE_PTR; 471 return ZERO_SIZE_PTR;
470 472
471 m = slob_alloc(size + align, gfp, align, node); 473 m = slob_alloc(size + align, gfp, align, node);
474
472 if (!m) 475 if (!m)
473 return NULL; 476 return NULL;
474 *m = size; 477 *m = size;
475 return (void *)m + align; 478 ret = (void *)m + align;
479
480 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
481 _RET_IP_, ret,
482 size, size + align, gfp, node);
476 } else { 483 } else {
477 void *ret; 484 unsigned int order = get_order(size);
478 485
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 486 ret = slob_new_page(gfp | __GFP_COMP, order, node);
480 if (ret) { 487 if (ret) {
481 struct page *page; 488 struct page *page;
482 page = virt_to_page(ret); 489 page = virt_to_page(ret);
483 page->private = size; 490 page->private = size;
484 } 491 }
485 return ret; 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, PAGE_SIZE << order, gfp, node);
486 } 496 }
497
498 return ret;
487} 499}
488EXPORT_SYMBOL(__kmalloc_node); 500EXPORT_SYMBOL(__kmalloc_node);
489 501
@@ -501,6 +513,8 @@ void kfree(const void *block)
501 slob_free(m, *m + align); 513 slob_free(m, *m + align);
502 } else 514 } else
503 put_page(&sp->page); 515 put_page(&sp->page);
516
517 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
504} 518}
505EXPORT_SYMBOL(kfree); 519EXPORT_SYMBOL(kfree);
506 520
@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
569{ 583{
570 void *b; 584 void *b;
571 585
572 if (c->size < PAGE_SIZE) 586 if (c->size < PAGE_SIZE) {
573 b = slob_alloc(c->size, flags, c->align, node); 587 b = slob_alloc(c->size, flags, c->align, node);
574 else 588 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
589 _RET_IP_, b, c->size,
590 SLOB_UNITS(c->size) * SLOB_UNIT,
591 flags, node);
592 } else {
575 b = slob_new_page(flags, get_order(c->size), node); 593 b = slob_new_page(flags, get_order(c->size), node);
594 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
595 _RET_IP_, b, c->size,
596 PAGE_SIZE << get_order(c->size),
597 flags, node);
598 }
576 599
577 if (c->ctor) 600 if (c->ctor)
578 c->ctor(b); 601 c->ctor(b);
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
608 } else { 631 } else {
609 __kmem_cache_free(b, c->size); 632 __kmem_cache_free(b, c->size);
610 } 633 }
634
635 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
611} 636}
612EXPORT_SYMBOL(kmem_cache_free); 637EXPORT_SYMBOL(kmem_cache_free);
613 638
diff --git a/mm/slub.c b/mm/slub.c
index 6392ae5cc6b1..f657c88814ee 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1623 1624
1624void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1625void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1625{ 1626{
1626 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1627 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1628
1629 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1630 s->objsize, s->size, gfpflags);
1631
1632 return ret;
1627} 1633}
1628EXPORT_SYMBOL(kmem_cache_alloc); 1634EXPORT_SYMBOL(kmem_cache_alloc);
1629 1635
1636#ifdef CONFIG_KMEMTRACE
1637void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1638{
1639 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1640}
1641EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1642#endif
1643
1630#ifdef CONFIG_NUMA 1644#ifdef CONFIG_NUMA
1631void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1645void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1632{ 1646{
1633 return slab_alloc(s, gfpflags, node, _RET_IP_); 1647 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1648
1649 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1650 s->objsize, s->size, gfpflags, node);
1651
1652 return ret;
1634} 1653}
1635EXPORT_SYMBOL(kmem_cache_alloc_node); 1654EXPORT_SYMBOL(kmem_cache_alloc_node);
1636#endif 1655#endif
1637 1656
1657#ifdef CONFIG_KMEMTRACE
1658void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1659 gfp_t gfpflags,
1660 int node)
1661{
1662 return slab_alloc(s, gfpflags, node, _RET_IP_);
1663}
1664EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1665#endif
1666
1638/* 1667/*
1639 * Slow patch handling. This may still be called frequently since objects 1668 * Slow patch handling. This may still be called frequently since objects
1640 * have a longer lifetime than the cpu slabs in most processing loads. 1669 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1742 page = virt_to_head_page(x); 1771 page = virt_to_head_page(x);
1743 1772
1744 slab_free(s, page, x, _RET_IP_); 1773 slab_free(s, page, x, _RET_IP_);
1774
1775 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1745} 1776}
1746EXPORT_SYMBOL(kmem_cache_free); 1777EXPORT_SYMBOL(kmem_cache_free);
1747 1778
@@ -2657,6 +2688,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2657void *__kmalloc(size_t size, gfp_t flags) 2688void *__kmalloc(size_t size, gfp_t flags)
2658{ 2689{
2659 struct kmem_cache *s; 2690 struct kmem_cache *s;
2691 void *ret;
2660 2692
2661 if (unlikely(size > PAGE_SIZE)) 2693 if (unlikely(size > PAGE_SIZE))
2662 return kmalloc_large(size, flags); 2694 return kmalloc_large(size, flags);
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2666 if (unlikely(ZERO_OR_NULL_PTR(s))) 2698 if (unlikely(ZERO_OR_NULL_PTR(s)))
2667 return s; 2699 return s;
2668 2700
2669 return slab_alloc(s, flags, -1, _RET_IP_); 2701 ret = slab_alloc(s, flags, -1, _RET_IP_);
2702
2703 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2704 size, s->size, flags);
2705
2706 return ret;
2670} 2707}
2671EXPORT_SYMBOL(__kmalloc); 2708EXPORT_SYMBOL(__kmalloc);
2672 2709
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2685void *__kmalloc_node(size_t size, gfp_t flags, int node) 2722void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2723{
2687 struct kmem_cache *s; 2724 struct kmem_cache *s;
2725 void *ret;
2688 2726
2689 if (unlikely(size > PAGE_SIZE)) 2727 if (unlikely(size > PAGE_SIZE)) {
2690 return kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2729
2730 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2731 _RET_IP_, ret,
2732 size, PAGE_SIZE << get_order(size),
2733 flags, node);
2734
2735 return ret;
2736 }
2691 2737
2692 s = get_slab(size, flags); 2738 s = get_slab(size, flags);
2693 2739
2694 if (unlikely(ZERO_OR_NULL_PTR(s))) 2740 if (unlikely(ZERO_OR_NULL_PTR(s)))
2695 return s; 2741 return s;
2696 2742
2697 return slab_alloc(s, flags, node, _RET_IP_); 2743 ret = slab_alloc(s, flags, node, _RET_IP_);
2744
2745 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2746 size, s->size, flags, node);
2747
2748 return ret;
2698} 2749}
2699EXPORT_SYMBOL(__kmalloc_node); 2750EXPORT_SYMBOL(__kmalloc_node);
2700#endif 2751#endif
@@ -2752,6 +2803,8 @@ void kfree(const void *x)
2752 return; 2803 return;
2753 } 2804 }
2754 slab_free(page->slab, page, object, _RET_IP_); 2805 slab_free(page->slab, page, object, _RET_IP_);
2806
2807 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2755} 2808}
2756EXPORT_SYMBOL(kfree); 2809EXPORT_SYMBOL(kfree);
2757 2810
@@ -3221,6 +3274,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3221void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3274void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3222{ 3275{
3223 struct kmem_cache *s; 3276 struct kmem_cache *s;
3277 void *ret;
3224 3278
3225 if (unlikely(size > PAGE_SIZE)) 3279 if (unlikely(size > PAGE_SIZE))
3226 return kmalloc_large(size, gfpflags); 3280 return kmalloc_large(size, gfpflags);
@@ -3230,13 +3284,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3230 if (unlikely(ZERO_OR_NULL_PTR(s))) 3284 if (unlikely(ZERO_OR_NULL_PTR(s)))
3231 return s; 3285 return s;
3232 3286
3233 return slab_alloc(s, gfpflags, -1, caller); 3287 ret = slab_alloc(s, gfpflags, -1, caller);
3288
3289 /* Honor the call site pointer we recieved. */
3290 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3291 s->size, gfpflags);
3292
3293 return ret;
3234} 3294}
3235 3295
3236void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3296void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3237 int node, unsigned long caller) 3297 int node, unsigned long caller)
3238{ 3298{
3239 struct kmem_cache *s; 3299 struct kmem_cache *s;
3300 void *ret;
3240 3301
3241 if (unlikely(size > PAGE_SIZE)) 3302 if (unlikely(size > PAGE_SIZE))
3242 return kmalloc_large_node(size, gfpflags, node); 3303 return kmalloc_large_node(size, gfpflags, node);
@@ -3246,7 +3307,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3246 if (unlikely(ZERO_OR_NULL_PTR(s))) 3307 if (unlikely(ZERO_OR_NULL_PTR(s)))
3247 return s; 3308 return s;
3248 3309
3249 return slab_alloc(s, gfpflags, node, caller); 3310 ret = slab_alloc(s, gfpflags, node, caller);
3311
3312 /* Honor the call site pointer we recieved. */
3313 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3314 size, s->size, gfpflags, node);
3315
3316 return ret;
3250} 3317}
3251 3318
3252#ifdef CONFIG_SLUB_DEBUG 3319#ifdef CONFIG_SLUB_DEBUG