diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mlock.c | 7 | ||||
-rw-r--r-- | mm/page-writeback.c | 15 | ||||
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 72 | ||||
-rw-r--r-- | mm/slob.c | 38 | ||||
-rw-r--r-- | mm/slub.c | 84 | ||||
-rw-r--r-- | mm/vmalloc.c | 8 |
8 files changed, 217 insertions, 36 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 037161d61b4e..cbe9e0581b75 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size) | |||
660 | return buffer; | 660 | return buffer; |
661 | } | 661 | } |
662 | 662 | ||
663 | void free_locked_buffer(void *buffer, size_t size) | 663 | void release_locked_buffer(void *buffer, size_t size) |
664 | { | 664 | { |
665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; |
666 | 666 | ||
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size) | |||
670 | current->mm->locked_vm -= pgsz; | 670 | current->mm->locked_vm -= pgsz; |
671 | 671 | ||
672 | up_write(¤t->mm->mmap_sem); | 672 | up_write(¤t->mm->mmap_sem); |
673 | } | ||
674 | |||
675 | void free_locked_buffer(void *buffer, size_t size) | ||
676 | { | ||
677 | release_locked_buffer(buffer, size); | ||
673 | 678 | ||
674 | kfree(buffer); | 679 | kfree(buffer); |
675 | } | 680 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6106a5c7ed44..74dc57c74349 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) | |||
240 | } | 240 | } |
241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
242 | 242 | ||
243 | static inline void task_dirty_inc(struct task_struct *tsk) | 243 | void task_dirty_inc(struct task_struct *tsk) |
244 | { | 244 | { |
245 | prop_inc_single(&vm_dirties, &tsk->dirties); | 245 | prop_inc_single(&vm_dirties, &tsk->dirties); |
246 | } | 246 | } |
@@ -1079,7 +1079,7 @@ continue_unlock: | |||
1079 | pagevec_release(&pvec); | 1079 | pagevec_release(&pvec); |
1080 | cond_resched(); | 1080 | cond_resched(); |
1081 | } | 1081 | } |
1082 | if (!cycled) { | 1082 | if (!cycled && !done) { |
1083 | /* | 1083 | /* |
1084 | * range_cyclic: | 1084 | * range_cyclic: |
1085 | * We hit the last page and there is more work to be done: wrap | 1085 | * We hit the last page and there is more work to be done: wrap |
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
1231 | __inc_bdi_stat(mapping->backing_dev_info, | 1231 | __inc_bdi_stat(mapping->backing_dev_info, |
1232 | BDI_RECLAIMABLE); | 1232 | BDI_RECLAIMABLE); |
1233 | task_dirty_inc(current); | ||
1233 | task_io_account_write(PAGE_CACHE_SIZE); | 1234 | task_io_account_write(PAGE_CACHE_SIZE); |
1234 | } | 1235 | } |
1235 | radix_tree_tag_set(&mapping->page_tree, | 1236 | radix_tree_tag_set(&mapping->page_tree, |
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); | |||
1262 | * If the mapping doesn't provide a set_page_dirty a_op, then | 1263 | * If the mapping doesn't provide a set_page_dirty a_op, then |
1263 | * just fall through and assume that it wants buffer_heads. | 1264 | * just fall through and assume that it wants buffer_heads. |
1264 | */ | 1265 | */ |
1265 | static int __set_page_dirty(struct page *page) | 1266 | int set_page_dirty(struct page *page) |
1266 | { | 1267 | { |
1267 | struct address_space *mapping = page_mapping(page); | 1268 | struct address_space *mapping = page_mapping(page); |
1268 | 1269 | ||
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page) | |||
1280 | } | 1281 | } |
1281 | return 0; | 1282 | return 0; |
1282 | } | 1283 | } |
1283 | |||
1284 | int set_page_dirty(struct page *page) | ||
1285 | { | ||
1286 | int ret = __set_page_dirty(page); | ||
1287 | if (ret) | ||
1288 | task_dirty_inc(current); | ||
1289 | return ret; | ||
1290 | } | ||
1291 | EXPORT_SYMBOL(set_page_dirty); | 1284 | EXPORT_SYMBOL(set_page_dirty); |
1292 | 1285 | ||
1293 | /* | 1286 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b3073854..5c44ed49ca93 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) | |||
2989 | * was used and there are no special requirements, this is a convenient | 2989 | * was used and there are no special requirements, this is a convenient |
2990 | * alternative | 2990 | * alternative |
2991 | */ | 2991 | */ |
2992 | int __meminit early_pfn_to_nid(unsigned long pfn) | 2992 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
2993 | { | 2993 | { |
2994 | int i; | 2994 | int i; |
2995 | 2995 | ||
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn) | |||
3000 | if (start_pfn <= pfn && pfn < end_pfn) | 3000 | if (start_pfn <= pfn && pfn < end_pfn) |
3001 | return early_node_map[i].nid; | 3001 | return early_node_map[i].nid; |
3002 | } | 3002 | } |
3003 | /* This is a memory hole */ | ||
3004 | return -1; | ||
3005 | } | ||
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
3007 | |||
3008 | int __meminit early_pfn_to_nid(unsigned long pfn) | ||
3009 | { | ||
3010 | int nid; | ||
3003 | 3011 | ||
3012 | nid = __early_pfn_to_nid(pfn); | ||
3013 | if (nid >= 0) | ||
3014 | return nid; | ||
3015 | /* just returns 0 */ | ||
3004 | return 0; | 3016 | return 0; |
3005 | } | 3017 | } |
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | 3018 | |
3019 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | ||
3020 | bool __meminit early_pfn_in_nid(unsigned long pfn, int node) | ||
3021 | { | ||
3022 | int nid; | ||
3023 | |||
3024 | nid = __early_pfn_to_nid(pfn); | ||
3025 | if (nid >= 0 && nid != node) | ||
3026 | return false; | ||
3027 | return true; | ||
3028 | } | ||
3029 | #endif | ||
3007 | 3030 | ||
3008 | /* Basic iterator support to walk early_node_map[] */ | 3031 | /* Basic iterator support to walk early_node_map[] */ |
3009 | #define for_each_active_range_index_in_nid(i, nid) \ | 3032 | #define for_each_active_range_index_in_nid(i, nid) \ |
diff --git a/mm/page_io.c b/mm/page_io.c index dc6ce0afbded..3023c475e041 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | if (wbc->sync_mode == WB_SYNC_ALL) | 113 | if (wbc->sync_mode == WB_SYNC_ALL) |
114 | rw |= (1 << BIO_RW_SYNC); | 114 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
115 | count_vm_event(PSWPOUT); | 115 | count_vm_event(PSWPOUT); |
116 | set_page_writeback(page); | 116 | set_page_writeback(page); |
117 | unlock_page(page); | 117 | unlock_page(page); |
@@ -102,6 +102,7 @@ | |||
102 | #include <linux/cpu.h> | 102 | #include <linux/cpu.h> |
103 | #include <linux/sysctl.h> | 103 | #include <linux/sysctl.h> |
104 | #include <linux/module.h> | 104 | #include <linux/module.h> |
105 | #include <trace/kmemtrace.h> | ||
105 | #include <linux/rcupdate.h> | 106 | #include <linux/rcupdate.h> |
106 | #include <linux/string.h> | 107 | #include <linux/string.h> |
107 | #include <linux/uaccess.h> | 108 | #include <linux/uaccess.h> |
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
568 | 569 | ||
569 | #endif | 570 | #endif |
570 | 571 | ||
572 | #ifdef CONFIG_KMEMTRACE | ||
573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
574 | { | ||
575 | return cachep->buffer_size; | ||
576 | } | ||
577 | EXPORT_SYMBOL(slab_buffer_size); | ||
578 | #endif | ||
579 | |||
571 | /* | 580 | /* |
572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
573 | */ | 582 | */ |
@@ -3550,10 +3559,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3550 | */ | 3559 | */ |
3551 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3560 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3552 | { | 3561 | { |
3553 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3562 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3563 | |||
3564 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3565 | obj_size(cachep), cachep->buffer_size, flags); | ||
3566 | |||
3567 | return ret; | ||
3554 | } | 3568 | } |
3555 | EXPORT_SYMBOL(kmem_cache_alloc); | 3569 | EXPORT_SYMBOL(kmem_cache_alloc); |
3556 | 3570 | ||
3571 | #ifdef CONFIG_KMEMTRACE | ||
3572 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
3573 | { | ||
3574 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3575 | } | ||
3576 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3577 | #endif | ||
3578 | |||
3557 | /** | 3579 | /** |
3558 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3580 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3559 | * @cachep: the cache we're checking against | 3581 | * @cachep: the cache we're checking against |
@@ -3598,23 +3620,47 @@ out: | |||
3598 | #ifdef CONFIG_NUMA | 3620 | #ifdef CONFIG_NUMA |
3599 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3621 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3600 | { | 3622 | { |
3601 | return __cache_alloc_node(cachep, flags, nodeid, | 3623 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
3602 | __builtin_return_address(0)); | 3624 | __builtin_return_address(0)); |
3625 | |||
3626 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3627 | obj_size(cachep), cachep->buffer_size, | ||
3628 | flags, nodeid); | ||
3629 | |||
3630 | return ret; | ||
3603 | } | 3631 | } |
3604 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3632 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3605 | 3633 | ||
3634 | #ifdef CONFIG_KMEMTRACE | ||
3635 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
3636 | gfp_t flags, | ||
3637 | int nodeid) | ||
3638 | { | ||
3639 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3640 | __builtin_return_address(0)); | ||
3641 | } | ||
3642 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
3643 | #endif | ||
3644 | |||
3606 | static __always_inline void * | 3645 | static __always_inline void * |
3607 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3646 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3608 | { | 3647 | { |
3609 | struct kmem_cache *cachep; | 3648 | struct kmem_cache *cachep; |
3649 | void *ret; | ||
3610 | 3650 | ||
3611 | cachep = kmem_find_general_cachep(size, flags); | 3651 | cachep = kmem_find_general_cachep(size, flags); |
3612 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3652 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3613 | return cachep; | 3653 | return cachep; |
3614 | return kmem_cache_alloc_node(cachep, flags, node); | 3654 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
3655 | |||
3656 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
3657 | (unsigned long) caller, ret, | ||
3658 | size, cachep->buffer_size, flags, node); | ||
3659 | |||
3660 | return ret; | ||
3615 | } | 3661 | } |
3616 | 3662 | ||
3617 | #ifdef CONFIG_DEBUG_SLAB | 3663 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3618 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3664 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3619 | { | 3665 | { |
3620 | return __do_kmalloc_node(size, flags, node, | 3666 | return __do_kmalloc_node(size, flags, node, |
@@ -3647,6 +3693,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3647 | void *caller) | 3693 | void *caller) |
3648 | { | 3694 | { |
3649 | struct kmem_cache *cachep; | 3695 | struct kmem_cache *cachep; |
3696 | void *ret; | ||
3650 | 3697 | ||
3651 | /* If you want to save a few bytes .text space: replace | 3698 | /* If you want to save a few bytes .text space: replace |
3652 | * __ with kmem_. | 3699 | * __ with kmem_. |
@@ -3656,11 +3703,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3656 | cachep = __find_general_cachep(size, flags); | 3703 | cachep = __find_general_cachep(size, flags); |
3657 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3704 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3658 | return cachep; | 3705 | return cachep; |
3659 | return __cache_alloc(cachep, flags, caller); | 3706 | ret = __cache_alloc(cachep, flags, caller); |
3707 | |||
3708 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
3709 | (unsigned long) caller, ret, | ||
3710 | size, cachep->buffer_size, flags); | ||
3711 | |||
3712 | return ret; | ||
3660 | } | 3713 | } |
3661 | 3714 | ||
3662 | 3715 | ||
3663 | #ifdef CONFIG_DEBUG_SLAB | 3716 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3664 | void *__kmalloc(size_t size, gfp_t flags) | 3717 | void *__kmalloc(size_t size, gfp_t flags) |
3665 | { | 3718 | { |
3666 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3719 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
@@ -3699,6 +3752,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3699 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3752 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3700 | __cache_free(cachep, objp); | 3753 | __cache_free(cachep, objp); |
3701 | local_irq_restore(flags); | 3754 | local_irq_restore(flags); |
3755 | |||
3756 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
3702 | } | 3757 | } |
3703 | EXPORT_SYMBOL(kmem_cache_free); | 3758 | EXPORT_SYMBOL(kmem_cache_free); |
3704 | 3759 | ||
@@ -3725,6 +3780,8 @@ void kfree(const void *objp) | |||
3725 | debug_check_no_obj_freed(objp, obj_size(c)); | 3780 | debug_check_no_obj_freed(objp, obj_size(c)); |
3726 | __cache_free(c, (void *)objp); | 3781 | __cache_free(c, (void *)objp); |
3727 | local_irq_restore(flags); | 3782 | local_irq_restore(flags); |
3783 | |||
3784 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
3728 | } | 3785 | } |
3729 | EXPORT_SYMBOL(kfree); | 3786 | EXPORT_SYMBOL(kfree); |
3730 | 3787 | ||
@@ -4457,3 +4514,4 @@ size_t ksize(const void *objp) | |||
4457 | 4514 | ||
4458 | return obj_size(virt_to_cache(objp)); | 4515 | return obj_size(virt_to_cache(objp)); |
4459 | } | 4516 | } |
4517 | EXPORT_SYMBOL(ksize); | ||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <trace/kmemtrace.h> | ||
68 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
69 | 70 | ||
70 | /* | 71 | /* |
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
463 | { | 464 | { |
464 | unsigned int *m; | 465 | unsigned int *m; |
465 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 466 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
467 | void *ret; | ||
466 | 468 | ||
467 | if (size < PAGE_SIZE - align) { | 469 | if (size < PAGE_SIZE - align) { |
468 | if (!size) | 470 | if (!size) |
469 | return ZERO_SIZE_PTR; | 471 | return ZERO_SIZE_PTR; |
470 | 472 | ||
471 | m = slob_alloc(size + align, gfp, align, node); | 473 | m = slob_alloc(size + align, gfp, align, node); |
474 | |||
472 | if (!m) | 475 | if (!m) |
473 | return NULL; | 476 | return NULL; |
474 | *m = size; | 477 | *m = size; |
475 | return (void *)m + align; | 478 | ret = (void *)m + align; |
479 | |||
480 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
481 | _RET_IP_, ret, | ||
482 | size, size + align, gfp, node); | ||
476 | } else { | 483 | } else { |
477 | void *ret; | 484 | unsigned int order = get_order(size); |
478 | 485 | ||
479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 486 | ret = slob_new_page(gfp | __GFP_COMP, order, node); |
480 | if (ret) { | 487 | if (ret) { |
481 | struct page *page; | 488 | struct page *page; |
482 | page = virt_to_page(ret); | 489 | page = virt_to_page(ret); |
483 | page->private = size; | 490 | page->private = size; |
484 | } | 491 | } |
485 | return ret; | 492 | |
493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
494 | _RET_IP_, ret, | ||
495 | size, PAGE_SIZE << order, gfp, node); | ||
486 | } | 496 | } |
497 | |||
498 | return ret; | ||
487 | } | 499 | } |
488 | EXPORT_SYMBOL(__kmalloc_node); | 500 | EXPORT_SYMBOL(__kmalloc_node); |
489 | 501 | ||
@@ -501,6 +513,8 @@ void kfree(const void *block) | |||
501 | slob_free(m, *m + align); | 513 | slob_free(m, *m + align); |
502 | } else | 514 | } else |
503 | put_page(&sp->page); | 515 | put_page(&sp->page); |
516 | |||
517 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
504 | } | 518 | } |
505 | EXPORT_SYMBOL(kfree); | 519 | EXPORT_SYMBOL(kfree); |
506 | 520 | ||
@@ -521,6 +535,7 @@ size_t ksize(const void *block) | |||
521 | } else | 535 | } else |
522 | return sp->page.private; | 536 | return sp->page.private; |
523 | } | 537 | } |
538 | EXPORT_SYMBOL(ksize); | ||
524 | 539 | ||
525 | struct kmem_cache { | 540 | struct kmem_cache { |
526 | unsigned int size, align; | 541 | unsigned int size, align; |
@@ -569,10 +584,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
569 | { | 584 | { |
570 | void *b; | 585 | void *b; |
571 | 586 | ||
572 | if (c->size < PAGE_SIZE) | 587 | if (c->size < PAGE_SIZE) { |
573 | b = slob_alloc(c->size, flags, c->align, node); | 588 | b = slob_alloc(c->size, flags, c->align, node); |
574 | else | 589 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
590 | _RET_IP_, b, c->size, | ||
591 | SLOB_UNITS(c->size) * SLOB_UNIT, | ||
592 | flags, node); | ||
593 | } else { | ||
575 | b = slob_new_page(flags, get_order(c->size), node); | 594 | b = slob_new_page(flags, get_order(c->size), node); |
595 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | ||
596 | _RET_IP_, b, c->size, | ||
597 | PAGE_SIZE << get_order(c->size), | ||
598 | flags, node); | ||
599 | } | ||
576 | 600 | ||
577 | if (c->ctor) | 601 | if (c->ctor) |
578 | c->ctor(b); | 602 | c->ctor(b); |
@@ -608,6 +632,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
608 | } else { | 632 | } else { |
609 | __kmem_cache_free(b, c->size); | 633 | __kmem_cache_free(b, c->size); |
610 | } | 634 | } |
635 | |||
636 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | ||
611 | } | 637 | } |
612 | EXPORT_SYMBOL(kmem_cache_free); | 638 | EXPORT_SYMBOL(kmem_cache_free); |
613 | 639 | ||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <trace/kmemtrace.h> | ||
19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
@@ -1623,18 +1624,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1623 | 1624 | ||
1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1625 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1625 | { | 1626 | { |
1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1627 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1628 | |||
1629 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1630 | s->objsize, s->size, gfpflags); | ||
1631 | |||
1632 | return ret; | ||
1627 | } | 1633 | } |
1628 | EXPORT_SYMBOL(kmem_cache_alloc); | 1634 | EXPORT_SYMBOL(kmem_cache_alloc); |
1629 | 1635 | ||
1636 | #ifdef CONFIG_KMEMTRACE | ||
1637 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1638 | { | ||
1639 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1640 | } | ||
1641 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1642 | #endif | ||
1643 | |||
1630 | #ifdef CONFIG_NUMA | 1644 | #ifdef CONFIG_NUMA |
1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1645 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1632 | { | 1646 | { |
1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1647 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1648 | |||
1649 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1650 | s->objsize, s->size, gfpflags, node); | ||
1651 | |||
1652 | return ret; | ||
1634 | } | 1653 | } |
1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1654 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1636 | #endif | 1655 | #endif |
1637 | 1656 | ||
1657 | #ifdef CONFIG_KMEMTRACE | ||
1658 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1659 | gfp_t gfpflags, | ||
1660 | int node) | ||
1661 | { | ||
1662 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1663 | } | ||
1664 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1665 | #endif | ||
1666 | |||
1638 | /* | 1667 | /* |
1639 | * Slow patch handling. This may still be called frequently since objects | 1668 | * Slow patch handling. This may still be called frequently since objects |
1640 | * have a longer lifetime than the cpu slabs in most processing loads. | 1669 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1742,6 +1771,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1742 | page = virt_to_head_page(x); | 1771 | page = virt_to_head_page(x); |
1743 | 1772 | ||
1744 | slab_free(s, page, x, _RET_IP_); | 1773 | slab_free(s, page, x, _RET_IP_); |
1774 | |||
1775 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1745 | } | 1776 | } |
1746 | EXPORT_SYMBOL(kmem_cache_free); | 1777 | EXPORT_SYMBOL(kmem_cache_free); |
1747 | 1778 | ||
@@ -2657,6 +2688,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2657 | void *__kmalloc(size_t size, gfp_t flags) | 2688 | void *__kmalloc(size_t size, gfp_t flags) |
2658 | { | 2689 | { |
2659 | struct kmem_cache *s; | 2690 | struct kmem_cache *s; |
2691 | void *ret; | ||
2660 | 2692 | ||
2661 | if (unlikely(size > SLUB_MAX_SIZE)) | 2693 | if (unlikely(size > SLUB_MAX_SIZE)) |
2662 | return kmalloc_large(size, flags); | 2694 | return kmalloc_large(size, flags); |
@@ -2666,7 +2698,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2698 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2667 | return s; | 2699 | return s; |
2668 | 2700 | ||
2669 | return slab_alloc(s, flags, -1, _RET_IP_); | 2701 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2702 | |||
2703 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2704 | size, s->size, flags); | ||
2705 | |||
2706 | return ret; | ||
2670 | } | 2707 | } |
2671 | EXPORT_SYMBOL(__kmalloc); | 2708 | EXPORT_SYMBOL(__kmalloc); |
2672 | 2709 | ||
@@ -2685,16 +2722,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2685 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2722 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2686 | { | 2723 | { |
2687 | struct kmem_cache *s; | 2724 | struct kmem_cache *s; |
2725 | void *ret; | ||
2688 | 2726 | ||
2689 | if (unlikely(size > SLUB_MAX_SIZE)) | 2727 | if (unlikely(size > SLUB_MAX_SIZE)) { |
2690 | return kmalloc_large_node(size, flags, node); | 2728 | ret = kmalloc_large_node(size, flags, node); |
2729 | |||
2730 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2731 | _RET_IP_, ret, | ||
2732 | size, PAGE_SIZE << get_order(size), | ||
2733 | flags, node); | ||
2734 | |||
2735 | return ret; | ||
2736 | } | ||
2691 | 2737 | ||
2692 | s = get_slab(size, flags); | 2738 | s = get_slab(size, flags); |
2693 | 2739 | ||
2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2740 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2695 | return s; | 2741 | return s; |
2696 | 2742 | ||
2697 | return slab_alloc(s, flags, node, _RET_IP_); | 2743 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2744 | |||
2745 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2746 | size, s->size, flags, node); | ||
2747 | |||
2748 | return ret; | ||
2698 | } | 2749 | } |
2699 | EXPORT_SYMBOL(__kmalloc_node); | 2750 | EXPORT_SYMBOL(__kmalloc_node); |
2700 | #endif | 2751 | #endif |
@@ -2736,6 +2787,7 @@ size_t ksize(const void *object) | |||
2736 | */ | 2787 | */ |
2737 | return s->size; | 2788 | return s->size; |
2738 | } | 2789 | } |
2790 | EXPORT_SYMBOL(ksize); | ||
2739 | 2791 | ||
2740 | void kfree(const void *x) | 2792 | void kfree(const void *x) |
2741 | { | 2793 | { |
@@ -2752,6 +2804,8 @@ void kfree(const void *x) | |||
2752 | return; | 2804 | return; |
2753 | } | 2805 | } |
2754 | slab_free(page->slab, page, object, _RET_IP_); | 2806 | slab_free(page->slab, page, object, _RET_IP_); |
2807 | |||
2808 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2755 | } | 2809 | } |
2756 | EXPORT_SYMBOL(kfree); | 2810 | EXPORT_SYMBOL(kfree); |
2757 | 2811 | ||
@@ -3221,6 +3275,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3221 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3275 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3222 | { | 3276 | { |
3223 | struct kmem_cache *s; | 3277 | struct kmem_cache *s; |
3278 | void *ret; | ||
3224 | 3279 | ||
3225 | if (unlikely(size > SLUB_MAX_SIZE)) | 3280 | if (unlikely(size > SLUB_MAX_SIZE)) |
3226 | return kmalloc_large(size, gfpflags); | 3281 | return kmalloc_large(size, gfpflags); |
@@ -3230,13 +3285,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3285 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3231 | return s; | 3286 | return s; |
3232 | 3287 | ||
3233 | return slab_alloc(s, gfpflags, -1, caller); | 3288 | ret = slab_alloc(s, gfpflags, -1, caller); |
3289 | |||
3290 | /* Honor the call site pointer we recieved. */ | ||
3291 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3292 | s->size, gfpflags); | ||
3293 | |||
3294 | return ret; | ||
3234 | } | 3295 | } |
3235 | 3296 | ||
3236 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3297 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3237 | int node, unsigned long caller) | 3298 | int node, unsigned long caller) |
3238 | { | 3299 | { |
3239 | struct kmem_cache *s; | 3300 | struct kmem_cache *s; |
3301 | void *ret; | ||
3240 | 3302 | ||
3241 | if (unlikely(size > SLUB_MAX_SIZE)) | 3303 | if (unlikely(size > SLUB_MAX_SIZE)) |
3242 | return kmalloc_large_node(size, gfpflags, node); | 3304 | return kmalloc_large_node(size, gfpflags, node); |
@@ -3246,7 +3308,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3246 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3308 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3247 | return s; | 3309 | return s; |
3248 | 3310 | ||
3249 | return slab_alloc(s, gfpflags, node, caller); | 3311 | ret = slab_alloc(s, gfpflags, node, caller); |
3312 | |||
3313 | /* Honor the call site pointer we recieved. */ | ||
3314 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3315 | size, s->size, gfpflags, node); | ||
3316 | |||
3317 | return ret; | ||
3250 | } | 3318 | } |
3251 | 3319 | ||
3252 | #ifdef CONFIG_SLUB_DEBUG | 3320 | #ifdef CONFIG_SLUB_DEBUG |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 75f49d312e8c..4dd2636d0b92 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1106,6 +1106,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |||
1106 | } | 1106 | } |
1107 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1107 | EXPORT_SYMBOL_GPL(__get_vm_area); |
1108 | 1108 | ||
1109 | struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | ||
1110 | unsigned long start, unsigned long end, | ||
1111 | void *caller) | ||
1112 | { | ||
1113 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | ||
1114 | caller); | ||
1115 | } | ||
1116 | |||
1109 | /** | 1117 | /** |
1110 | * get_vm_area - reserve a contiguous kernel virtual area | 1118 | * get_vm_area - reserve a contiguous kernel virtual area |
1111 | * @size: size of the area | 1119 | * @size: size of the area |