diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slab.c | 71 | ||||
| -rw-r--r-- | mm/slob.c | 37 | ||||
| -rw-r--r-- | mm/slub.c | 97 |
3 files changed, 177 insertions, 28 deletions
| @@ -102,6 +102,7 @@ | |||
| 102 | #include <linux/cpu.h> | 102 | #include <linux/cpu.h> |
| 103 | #include <linux/sysctl.h> | 103 | #include <linux/sysctl.h> |
| 104 | #include <linux/module.h> | 104 | #include <linux/module.h> |
| 105 | #include <trace/kmemtrace.h> | ||
| 105 | #include <linux/rcupdate.h> | 106 | #include <linux/rcupdate.h> |
| 106 | #include <linux/string.h> | 107 | #include <linux/string.h> |
| 107 | #include <linux/uaccess.h> | 108 | #include <linux/uaccess.h> |
| @@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
| 568 | 569 | ||
| 569 | #endif | 570 | #endif |
| 570 | 571 | ||
| 572 | #ifdef CONFIG_KMEMTRACE | ||
| 573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
| 574 | { | ||
| 575 | return cachep->buffer_size; | ||
| 576 | } | ||
| 577 | EXPORT_SYMBOL(slab_buffer_size); | ||
| 578 | #endif | ||
| 579 | |||
| 571 | /* | 580 | /* |
| 572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
| 573 | */ | 582 | */ |
| @@ -3550,10 +3559,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3550 | */ | 3559 | */ |
| 3551 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3560 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
| 3552 | { | 3561 | { |
| 3553 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3562 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
| 3563 | |||
| 3564 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 3565 | obj_size(cachep), cachep->buffer_size, flags); | ||
| 3566 | |||
| 3567 | return ret; | ||
| 3554 | } | 3568 | } |
| 3555 | EXPORT_SYMBOL(kmem_cache_alloc); | 3569 | EXPORT_SYMBOL(kmem_cache_alloc); |
| 3556 | 3570 | ||
| 3571 | #ifdef CONFIG_KMEMTRACE | ||
| 3572 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
| 3573 | { | ||
| 3574 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
| 3575 | } | ||
| 3576 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
| 3577 | #endif | ||
| 3578 | |||
| 3557 | /** | 3579 | /** |
| 3558 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3580 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
| 3559 | * @cachep: the cache we're checking against | 3581 | * @cachep: the cache we're checking against |
| @@ -3598,23 +3620,47 @@ out: | |||
| 3598 | #ifdef CONFIG_NUMA | 3620 | #ifdef CONFIG_NUMA |
| 3599 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3621 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
| 3600 | { | 3622 | { |
| 3601 | return __cache_alloc_node(cachep, flags, nodeid, | 3623 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
| 3602 | __builtin_return_address(0)); | 3624 | __builtin_return_address(0)); |
| 3625 | |||
| 3626 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 3627 | obj_size(cachep), cachep->buffer_size, | ||
| 3628 | flags, nodeid); | ||
| 3629 | |||
| 3630 | return ret; | ||
| 3603 | } | 3631 | } |
| 3604 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3632 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 3605 | 3633 | ||
| 3634 | #ifdef CONFIG_KMEMTRACE | ||
| 3635 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
| 3636 | gfp_t flags, | ||
| 3637 | int nodeid) | ||
| 3638 | { | ||
| 3639 | return __cache_alloc_node(cachep, flags, nodeid, | ||
| 3640 | __builtin_return_address(0)); | ||
| 3641 | } | ||
| 3642 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
| 3643 | #endif | ||
| 3644 | |||
| 3606 | static __always_inline void * | 3645 | static __always_inline void * |
| 3607 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3646 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
| 3608 | { | 3647 | { |
| 3609 | struct kmem_cache *cachep; | 3648 | struct kmem_cache *cachep; |
| 3649 | void *ret; | ||
| 3610 | 3650 | ||
| 3611 | cachep = kmem_find_general_cachep(size, flags); | 3651 | cachep = kmem_find_general_cachep(size, flags); |
| 3612 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3652 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3613 | return cachep; | 3653 | return cachep; |
| 3614 | return kmem_cache_alloc_node(cachep, flags, node); | 3654 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
| 3655 | |||
| 3656 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
| 3657 | (unsigned long) caller, ret, | ||
| 3658 | size, cachep->buffer_size, flags, node); | ||
| 3659 | |||
| 3660 | return ret; | ||
| 3615 | } | 3661 | } |
| 3616 | 3662 | ||
| 3617 | #ifdef CONFIG_DEBUG_SLAB | 3663 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
| 3618 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3664 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 3619 | { | 3665 | { |
| 3620 | return __do_kmalloc_node(size, flags, node, | 3666 | return __do_kmalloc_node(size, flags, node, |
| @@ -3647,6 +3693,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3647 | void *caller) | 3693 | void *caller) |
| 3648 | { | 3694 | { |
| 3649 | struct kmem_cache *cachep; | 3695 | struct kmem_cache *cachep; |
| 3696 | void *ret; | ||
| 3650 | 3697 | ||
| 3651 | /* If you want to save a few bytes .text space: replace | 3698 | /* If you want to save a few bytes .text space: replace |
| 3652 | * __ with kmem_. | 3699 | * __ with kmem_. |
| @@ -3656,11 +3703,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3656 | cachep = __find_general_cachep(size, flags); | 3703 | cachep = __find_general_cachep(size, flags); |
| 3657 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3704 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3658 | return cachep; | 3705 | return cachep; |
| 3659 | return __cache_alloc(cachep, flags, caller); | 3706 | ret = __cache_alloc(cachep, flags, caller); |
| 3707 | |||
| 3708 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
| 3709 | (unsigned long) caller, ret, | ||
| 3710 | size, cachep->buffer_size, flags); | ||
| 3711 | |||
| 3712 | return ret; | ||
| 3660 | } | 3713 | } |
| 3661 | 3714 | ||
| 3662 | 3715 | ||
| 3663 | #ifdef CONFIG_DEBUG_SLAB | 3716 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
| 3664 | void *__kmalloc(size_t size, gfp_t flags) | 3717 | void *__kmalloc(size_t size, gfp_t flags) |
| 3665 | { | 3718 | { |
| 3666 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3719 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
| @@ -3699,6 +3752,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3699 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3752 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
| 3700 | __cache_free(cachep, objp); | 3753 | __cache_free(cachep, objp); |
| 3701 | local_irq_restore(flags); | 3754 | local_irq_restore(flags); |
| 3755 | |||
| 3756 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
| 3702 | } | 3757 | } |
| 3703 | EXPORT_SYMBOL(kmem_cache_free); | 3758 | EXPORT_SYMBOL(kmem_cache_free); |
| 3704 | 3759 | ||
| @@ -3725,6 +3780,8 @@ void kfree(const void *objp) | |||
| 3725 | debug_check_no_obj_freed(objp, obj_size(c)); | 3780 | debug_check_no_obj_freed(objp, obj_size(c)); |
| 3726 | __cache_free(c, (void *)objp); | 3781 | __cache_free(c, (void *)objp); |
| 3727 | local_irq_restore(flags); | 3782 | local_irq_restore(flags); |
| 3783 | |||
| 3784 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
| 3728 | } | 3785 | } |
| 3729 | EXPORT_SYMBOL(kfree); | 3786 | EXPORT_SYMBOL(kfree); |
| 3730 | 3787 | ||
| @@ -65,6 +65,7 @@ | |||
| 65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
| 66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
| 67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
| 68 | #include <trace/kmemtrace.h> | ||
| 68 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
| 69 | 70 | ||
| 70 | /* | 71 | /* |
| @@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 463 | { | 464 | { |
| 464 | unsigned int *m; | 465 | unsigned int *m; |
| 465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 466 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 467 | void *ret; | ||
| 466 | 468 | ||
| 467 | if (size < PAGE_SIZE - align) { | 469 | if (size < PAGE_SIZE - align) { |
| 468 | if (!size) | 470 | if (!size) |
| 469 | return ZERO_SIZE_PTR; | 471 | return ZERO_SIZE_PTR; |
| 470 | 472 | ||
| 471 | m = slob_alloc(size + align, gfp, align, node); | 473 | m = slob_alloc(size + align, gfp, align, node); |
| 474 | |||
| 472 | if (!m) | 475 | if (!m) |
| 473 | return NULL; | 476 | return NULL; |
| 474 | *m = size; | 477 | *m = size; |
| 475 | return (void *)m + align; | 478 | ret = (void *)m + align; |
| 479 | |||
| 480 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
| 481 | _RET_IP_, ret, | ||
| 482 | size, size + align, gfp, node); | ||
| 476 | } else { | 483 | } else { |
| 477 | void *ret; | 484 | unsigned int order = get_order(size); |
| 478 | 485 | ||
| 479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 486 | ret = slob_new_page(gfp | __GFP_COMP, order, node); |
| 480 | if (ret) { | 487 | if (ret) { |
| 481 | struct page *page; | 488 | struct page *page; |
| 482 | page = virt_to_page(ret); | 489 | page = virt_to_page(ret); |
| 483 | page->private = size; | 490 | page->private = size; |
| 484 | } | 491 | } |
| 485 | return ret; | 492 | |
| 493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
| 494 | _RET_IP_, ret, | ||
| 495 | size, PAGE_SIZE << order, gfp, node); | ||
| 486 | } | 496 | } |
| 497 | |||
| 498 | return ret; | ||
| 487 | } | 499 | } |
| 488 | EXPORT_SYMBOL(__kmalloc_node); | 500 | EXPORT_SYMBOL(__kmalloc_node); |
| 489 | 501 | ||
| @@ -501,6 +513,8 @@ void kfree(const void *block) | |||
| 501 | slob_free(m, *m + align); | 513 | slob_free(m, *m + align); |
| 502 | } else | 514 | } else |
| 503 | put_page(&sp->page); | 515 | put_page(&sp->page); |
| 516 | |||
| 517 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
| 504 | } | 518 | } |
| 505 | EXPORT_SYMBOL(kfree); | 519 | EXPORT_SYMBOL(kfree); |
| 506 | 520 | ||
| @@ -570,10 +584,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 570 | { | 584 | { |
| 571 | void *b; | 585 | void *b; |
| 572 | 586 | ||
| 573 | if (c->size < PAGE_SIZE) | 587 | if (c->size < PAGE_SIZE) { |
| 574 | b = slob_alloc(c->size, flags, c->align, node); | 588 | b = slob_alloc(c->size, flags, c->align, node); |
| 575 | else | 589 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
| 590 | _RET_IP_, b, c->size, | ||
| 591 | SLOB_UNITS(c->size) * SLOB_UNIT, | ||
| 592 | flags, node); | ||
| 593 | } else { | ||
| 576 | b = slob_new_page(flags, get_order(c->size), node); | 594 | b = slob_new_page(flags, get_order(c->size), node); |
| 595 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | ||
| 596 | _RET_IP_, b, c->size, | ||
| 597 | PAGE_SIZE << get_order(c->size), | ||
| 598 | flags, node); | ||
| 599 | } | ||
| 577 | 600 | ||
| 578 | if (c->ctor) | 601 | if (c->ctor) |
| 579 | c->ctor(b); | 602 | c->ctor(b); |
| @@ -609,6 +632,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
| 609 | } else { | 632 | } else { |
| 610 | __kmem_cache_free(b, c->size); | 633 | __kmem_cache_free(b, c->size); |
| 611 | } | 634 | } |
| 635 | |||
| 636 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | ||
| 612 | } | 637 | } |
| 613 | EXPORT_SYMBOL(kmem_cache_free); | 638 | EXPORT_SYMBOL(kmem_cache_free); |
| 614 | 639 | ||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
| 18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
| 19 | #include <trace/kmemtrace.h> | ||
| 19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
| 20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
| 21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
| @@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1623 | 1624 | ||
| 1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1625 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| 1625 | { | 1626 | { |
| 1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1627 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
| 1628 | |||
| 1629 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1630 | s->objsize, s->size, gfpflags); | ||
| 1631 | |||
| 1632 | return ret; | ||
| 1627 | } | 1633 | } |
| 1628 | EXPORT_SYMBOL(kmem_cache_alloc); | 1634 | EXPORT_SYMBOL(kmem_cache_alloc); |
| 1629 | 1635 | ||
| 1636 | #ifdef CONFIG_KMEMTRACE | ||
| 1637 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
| 1638 | { | ||
| 1639 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
| 1640 | } | ||
| 1641 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
| 1642 | #endif | ||
| 1643 | |||
| 1630 | #ifdef CONFIG_NUMA | 1644 | #ifdef CONFIG_NUMA |
| 1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1645 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 1632 | { | 1646 | { |
| 1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1647 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
| 1648 | |||
| 1649 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1650 | s->objsize, s->size, gfpflags, node); | ||
| 1651 | |||
| 1652 | return ret; | ||
| 1634 | } | 1653 | } |
| 1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1654 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 1636 | #endif | 1655 | #endif |
| 1637 | 1656 | ||
| 1657 | #ifdef CONFIG_KMEMTRACE | ||
| 1658 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
| 1659 | gfp_t gfpflags, | ||
| 1660 | int node) | ||
| 1661 | { | ||
| 1662 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
| 1663 | } | ||
| 1664 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
| 1665 | #endif | ||
| 1666 | |||
| 1638 | /* | 1667 | /* |
| 1639 | * Slow patch handling. This may still be called frequently since objects | 1668 | * Slow patch handling. This may still be called frequently since objects |
| 1640 | * have a longer lifetime than the cpu slabs in most processing loads. | 1669 | * have a longer lifetime than the cpu slabs in most processing loads. |
| @@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 1742 | page = virt_to_head_page(x); | 1771 | page = virt_to_head_page(x); |
| 1743 | 1772 | ||
| 1744 | slab_free(s, page, x, _RET_IP_); | 1773 | slab_free(s, page, x, _RET_IP_); |
| 1774 | |||
| 1775 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
| 1745 | } | 1776 | } |
| 1746 | EXPORT_SYMBOL(kmem_cache_free); | 1777 | EXPORT_SYMBOL(kmem_cache_free); |
| 1747 | 1778 | ||
| @@ -2475,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2475 | * Kmalloc subsystem | 2506 | * Kmalloc subsystem |
| 2476 | *******************************************************************/ | 2507 | *******************************************************************/ |
| 2477 | 2508 | ||
| 2478 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2509 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
| 2479 | EXPORT_SYMBOL(kmalloc_caches); | 2510 | EXPORT_SYMBOL(kmalloc_caches); |
| 2480 | 2511 | ||
| 2481 | static int __init setup_slub_min_order(char *str) | 2512 | static int __init setup_slub_min_order(char *str) |
| @@ -2537,7 +2568,7 @@ panic: | |||
| 2537 | } | 2568 | } |
| 2538 | 2569 | ||
| 2539 | #ifdef CONFIG_ZONE_DMA | 2570 | #ifdef CONFIG_ZONE_DMA |
| 2540 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2571 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
| 2541 | 2572 | ||
| 2542 | static void sysfs_add_func(struct work_struct *w) | 2573 | static void sysfs_add_func(struct work_struct *w) |
| 2543 | { | 2574 | { |
| @@ -2657,8 +2688,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
| 2657 | void *__kmalloc(size_t size, gfp_t flags) | 2688 | void *__kmalloc(size_t size, gfp_t flags) |
| 2658 | { | 2689 | { |
| 2659 | struct kmem_cache *s; | 2690 | struct kmem_cache *s; |
| 2691 | void *ret; | ||
| 2660 | 2692 | ||
| 2661 | if (unlikely(size > PAGE_SIZE)) | 2693 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2662 | return kmalloc_large(size, flags); | 2694 | return kmalloc_large(size, flags); |
| 2663 | 2695 | ||
| 2664 | s = get_slab(size, flags); | 2696 | s = get_slab(size, flags); |
| @@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2698 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2667 | return s; | 2699 | return s; |
| 2668 | 2700 | ||
| 2669 | return slab_alloc(s, flags, -1, _RET_IP_); | 2701 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
| 2702 | |||
| 2703 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2704 | size, s->size, flags); | ||
| 2705 | |||
| 2706 | return ret; | ||
| 2670 | } | 2707 | } |
| 2671 | EXPORT_SYMBOL(__kmalloc); | 2708 | EXPORT_SYMBOL(__kmalloc); |
| 2672 | 2709 | ||
| @@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
| 2685 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2722 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 2686 | { | 2723 | { |
| 2687 | struct kmem_cache *s; | 2724 | struct kmem_cache *s; |
| 2725 | void *ret; | ||
| 2726 | |||
| 2727 | if (unlikely(size > SLUB_MAX_SIZE)) { | ||
| 2728 | ret = kmalloc_large_node(size, flags, node); | ||
| 2688 | 2729 | ||
| 2689 | if (unlikely(size > PAGE_SIZE)) | 2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, |
| 2690 | return kmalloc_large_node(size, flags, node); | 2731 | _RET_IP_, ret, |
| 2732 | size, PAGE_SIZE << get_order(size), | ||
| 2733 | flags, node); | ||
| 2734 | |||
| 2735 | return ret; | ||
| 2736 | } | ||
| 2691 | 2737 | ||
| 2692 | s = get_slab(size, flags); | 2738 | s = get_slab(size, flags); |
| 2693 | 2739 | ||
| 2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2740 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2695 | return s; | 2741 | return s; |
| 2696 | 2742 | ||
| 2697 | return slab_alloc(s, flags, node, _RET_IP_); | 2743 | ret = slab_alloc(s, flags, node, _RET_IP_); |
| 2744 | |||
| 2745 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2746 | size, s->size, flags, node); | ||
| 2747 | |||
| 2748 | return ret; | ||
| 2698 | } | 2749 | } |
| 2699 | EXPORT_SYMBOL(__kmalloc_node); | 2750 | EXPORT_SYMBOL(__kmalloc_node); |
| 2700 | #endif | 2751 | #endif |
| @@ -2753,6 +2804,8 @@ void kfree(const void *x) | |||
| 2753 | return; | 2804 | return; |
| 2754 | } | 2805 | } |
| 2755 | slab_free(page->slab, page, object, _RET_IP_); | 2806 | slab_free(page->slab, page, object, _RET_IP_); |
| 2807 | |||
| 2808 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
| 2756 | } | 2809 | } |
| 2757 | EXPORT_SYMBOL(kfree); | 2810 | EXPORT_SYMBOL(kfree); |
| 2758 | 2811 | ||
| @@ -2986,7 +3039,7 @@ void __init kmem_cache_init(void) | |||
| 2986 | caches++; | 3039 | caches++; |
| 2987 | } | 3040 | } |
| 2988 | 3041 | ||
| 2989 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 3042 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
| 2990 | create_kmalloc_cache(&kmalloc_caches[i], | 3043 | create_kmalloc_cache(&kmalloc_caches[i], |
| 2991 | "kmalloc", 1 << i, GFP_KERNEL); | 3044 | "kmalloc", 1 << i, GFP_KERNEL); |
| 2992 | caches++; | 3045 | caches++; |
| @@ -3023,7 +3076,7 @@ void __init kmem_cache_init(void) | |||
| 3023 | slab_state = UP; | 3076 | slab_state = UP; |
| 3024 | 3077 | ||
| 3025 | /* Provide the correct kmalloc names now that the caches are up */ | 3078 | /* Provide the correct kmalloc names now that the caches are up */ |
| 3026 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3079 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
| 3027 | kmalloc_caches[i]. name = | 3080 | kmalloc_caches[i]. name = |
| 3028 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3081 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
| 3029 | 3082 | ||
| @@ -3222,8 +3275,9 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
| 3222 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3275 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
| 3223 | { | 3276 | { |
| 3224 | struct kmem_cache *s; | 3277 | struct kmem_cache *s; |
| 3278 | void *ret; | ||
| 3225 | 3279 | ||
| 3226 | if (unlikely(size > PAGE_SIZE)) | 3280 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3227 | return kmalloc_large(size, gfpflags); | 3281 | return kmalloc_large(size, gfpflags); |
| 3228 | 3282 | ||
| 3229 | s = get_slab(size, gfpflags); | 3283 | s = get_slab(size, gfpflags); |
| @@ -3231,15 +3285,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3231 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3285 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3232 | return s; | 3286 | return s; |
| 3233 | 3287 | ||
| 3234 | return slab_alloc(s, gfpflags, -1, caller); | 3288 | ret = slab_alloc(s, gfpflags, -1, caller); |
| 3289 | |||
| 3290 | /* Honor the call site pointer we recieved. */ | ||
| 3291 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
| 3292 | s->size, gfpflags); | ||
| 3293 | |||
| 3294 | return ret; | ||
| 3235 | } | 3295 | } |
| 3236 | 3296 | ||
| 3237 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3297 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
| 3238 | int node, unsigned long caller) | 3298 | int node, unsigned long caller) |
| 3239 | { | 3299 | { |
| 3240 | struct kmem_cache *s; | 3300 | struct kmem_cache *s; |
| 3301 | void *ret; | ||
| 3241 | 3302 | ||
| 3242 | if (unlikely(size > PAGE_SIZE)) | 3303 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3243 | return kmalloc_large_node(size, gfpflags, node); | 3304 | return kmalloc_large_node(size, gfpflags, node); |
| 3244 | 3305 | ||
| 3245 | s = get_slab(size, gfpflags); | 3306 | s = get_slab(size, gfpflags); |
| @@ -3247,7 +3308,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3247 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3308 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3248 | return s; | 3309 | return s; |
| 3249 | 3310 | ||
| 3250 | return slab_alloc(s, gfpflags, node, caller); | 3311 | ret = slab_alloc(s, gfpflags, node, caller); |
| 3312 | |||
| 3313 | /* Honor the call site pointer we recieved. */ | ||
| 3314 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
| 3315 | size, s->size, gfpflags, node); | ||
| 3316 | |||
| 3317 | return ret; | ||
| 3251 | } | 3318 | } |
| 3252 | 3319 | ||
| 3253 | #ifdef CONFIG_SLUB_DEBUG | 3320 | #ifdef CONFIG_SLUB_DEBUG |
