diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/quicklist.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 73 | ||||
-rw-r--r-- | mm/slob.c | 35 | ||||
-rw-r--r-- | mm/slub.c | 83 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
6 files changed, 179 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0284e528748d..e2f26991fff1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -331,7 +331,7 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
331 | for (i = 1; i < nr_pages; i++) { | 331 | for (i = 1; i < nr_pages; i++) { |
332 | struct page *p = page + i; | 332 | struct page *p = page + i; |
333 | 333 | ||
334 | if (unlikely(!PageTail(p) | (p->first_page != page))) { | 334 | if (unlikely(!PageTail(p) || (p->first_page != page))) { |
335 | bad_page(page); | 335 | bad_page(page); |
336 | bad++; | 336 | bad++; |
337 | } | 337 | } |
@@ -2128,7 +2128,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) | |||
2128 | int n, val; | 2128 | int n, val; |
2129 | int min_val = INT_MAX; | 2129 | int min_val = INT_MAX; |
2130 | int best_node = -1; | 2130 | int best_node = -1; |
2131 | node_to_cpumask_ptr(tmp, 0); | 2131 | const struct cpumask *tmp = cpumask_of_node(0); |
2132 | 2132 | ||
2133 | /* Use the local node if we haven't already */ | 2133 | /* Use the local node if we haven't already */ |
2134 | if (!node_isset(node, *used_node_mask)) { | 2134 | if (!node_isset(node, *used_node_mask)) { |
@@ -2149,8 +2149,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) | |||
2149 | val += (n < node); | 2149 | val += (n < node); |
2150 | 2150 | ||
2151 | /* Give preference to headless and unused nodes */ | 2151 | /* Give preference to headless and unused nodes */ |
2152 | node_to_cpumask_ptr_next(tmp, n); | 2152 | tmp = cpumask_of_node(n); |
2153 | if (!cpus_empty(*tmp)) | 2153 | if (!cpumask_empty(tmp)) |
2154 | val += PENALTY_FOR_NODE_WITH_CPUS; | 2154 | val += PENALTY_FOR_NODE_WITH_CPUS; |
2155 | 2155 | ||
2156 | /* Slight preference for less loaded node */ | 2156 | /* Slight preference for less loaded node */ |
diff --git a/mm/quicklist.c b/mm/quicklist.c index 8dbb6805ef35..e66d07d1b4ff 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c | |||
@@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages) | |||
29 | int node = numa_node_id(); | 29 | int node = numa_node_id(); |
30 | struct zone *zones = NODE_DATA(node)->node_zones; | 30 | struct zone *zones = NODE_DATA(node)->node_zones; |
31 | int num_cpus_on_node; | 31 | int num_cpus_on_node; |
32 | node_to_cpumask_ptr(cpumask_on_node, node); | 32 | const struct cpumask *cpumask_on_node = cpumask_of_node(node); |
33 | 33 | ||
34 | node_free_pages = | 34 | node_free_pages = |
35 | #ifdef CONFIG_ZONE_DMA | 35 | #ifdef CONFIG_ZONE_DMA |
@@ -102,6 +102,7 @@ | |||
102 | #include <linux/cpu.h> | 102 | #include <linux/cpu.h> |
103 | #include <linux/sysctl.h> | 103 | #include <linux/sysctl.h> |
104 | #include <linux/module.h> | 104 | #include <linux/module.h> |
105 | #include <trace/kmemtrace.h> | ||
105 | #include <linux/rcupdate.h> | 106 | #include <linux/rcupdate.h> |
106 | #include <linux/string.h> | 107 | #include <linux/string.h> |
107 | #include <linux/uaccess.h> | 108 | #include <linux/uaccess.h> |
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
568 | 569 | ||
569 | #endif | 570 | #endif |
570 | 571 | ||
572 | #ifdef CONFIG_KMEMTRACE | ||
573 | size_t slab_buffer_size(struct kmem_cache *cachep) | ||
574 | { | ||
575 | return cachep->buffer_size; | ||
576 | } | ||
577 | EXPORT_SYMBOL(slab_buffer_size); | ||
578 | #endif | ||
579 | |||
571 | /* | 580 | /* |
572 | * Do not go above this order unless 0 objects fit into the slab. | 581 | * Do not go above this order unless 0 objects fit into the slab. |
573 | */ | 582 | */ |
@@ -1160,7 +1169,7 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1160 | struct kmem_cache *cachep; | 1169 | struct kmem_cache *cachep; |
1161 | struct kmem_list3 *l3 = NULL; | 1170 | struct kmem_list3 *l3 = NULL; |
1162 | int node = cpu_to_node(cpu); | 1171 | int node = cpu_to_node(cpu); |
1163 | node_to_cpumask_ptr(mask, node); | 1172 | const struct cpumask *mask = cpumask_of_node(node); |
1164 | 1173 | ||
1165 | list_for_each_entry(cachep, &cache_chain, next) { | 1174 | list_for_each_entry(cachep, &cache_chain, next) { |
1166 | struct array_cache *nc; | 1175 | struct array_cache *nc; |
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3554 | */ | 3563 | */ |
3555 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3564 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3556 | { | 3565 | { |
3557 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 3566 | void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); |
3567 | |||
3568 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3569 | obj_size(cachep), cachep->buffer_size, flags); | ||
3570 | |||
3571 | return ret; | ||
3558 | } | 3572 | } |
3559 | EXPORT_SYMBOL(kmem_cache_alloc); | 3573 | EXPORT_SYMBOL(kmem_cache_alloc); |
3560 | 3574 | ||
3575 | #ifdef CONFIG_KMEMTRACE | ||
3576 | void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
3577 | { | ||
3578 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | ||
3579 | } | ||
3580 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
3581 | #endif | ||
3582 | |||
3561 | /** | 3583 | /** |
3562 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | 3584 | * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. |
3563 | * @cachep: the cache we're checking against | 3585 | * @cachep: the cache we're checking against |
@@ -3602,23 +3624,47 @@ out: | |||
3602 | #ifdef CONFIG_NUMA | 3624 | #ifdef CONFIG_NUMA |
3603 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3625 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3604 | { | 3626 | { |
3605 | return __cache_alloc_node(cachep, flags, nodeid, | 3627 | void *ret = __cache_alloc_node(cachep, flags, nodeid, |
3606 | __builtin_return_address(0)); | 3628 | __builtin_return_address(0)); |
3629 | |||
3630 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
3631 | obj_size(cachep), cachep->buffer_size, | ||
3632 | flags, nodeid); | ||
3633 | |||
3634 | return ret; | ||
3607 | } | 3635 | } |
3608 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3636 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3609 | 3637 | ||
3638 | #ifdef CONFIG_KMEMTRACE | ||
3639 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
3640 | gfp_t flags, | ||
3641 | int nodeid) | ||
3642 | { | ||
3643 | return __cache_alloc_node(cachep, flags, nodeid, | ||
3644 | __builtin_return_address(0)); | ||
3645 | } | ||
3646 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
3647 | #endif | ||
3648 | |||
3610 | static __always_inline void * | 3649 | static __always_inline void * |
3611 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) | 3650 | __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) |
3612 | { | 3651 | { |
3613 | struct kmem_cache *cachep; | 3652 | struct kmem_cache *cachep; |
3653 | void *ret; | ||
3614 | 3654 | ||
3615 | cachep = kmem_find_general_cachep(size, flags); | 3655 | cachep = kmem_find_general_cachep(size, flags); |
3616 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3656 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3617 | return cachep; | 3657 | return cachep; |
3618 | return kmem_cache_alloc_node(cachep, flags, node); | 3658 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
3659 | |||
3660 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
3661 | (unsigned long) caller, ret, | ||
3662 | size, cachep->buffer_size, flags, node); | ||
3663 | |||
3664 | return ret; | ||
3619 | } | 3665 | } |
3620 | 3666 | ||
3621 | #ifdef CONFIG_DEBUG_SLAB | 3667 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3622 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 3668 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3623 | { | 3669 | { |
3624 | return __do_kmalloc_node(size, flags, node, | 3670 | return __do_kmalloc_node(size, flags, node, |
@@ -3651,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3651 | void *caller) | 3697 | void *caller) |
3652 | { | 3698 | { |
3653 | struct kmem_cache *cachep; | 3699 | struct kmem_cache *cachep; |
3700 | void *ret; | ||
3654 | 3701 | ||
3655 | /* If you want to save a few bytes .text space: replace | 3702 | /* If you want to save a few bytes .text space: replace |
3656 | * __ with kmem_. | 3703 | * __ with kmem_. |
@@ -3660,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3660 | cachep = __find_general_cachep(size, flags); | 3707 | cachep = __find_general_cachep(size, flags); |
3661 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3708 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3662 | return cachep; | 3709 | return cachep; |
3663 | return __cache_alloc(cachep, flags, caller); | 3710 | ret = __cache_alloc(cachep, flags, caller); |
3711 | |||
3712 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
3713 | (unsigned long) caller, ret, | ||
3714 | size, cachep->buffer_size, flags); | ||
3715 | |||
3716 | return ret; | ||
3664 | } | 3717 | } |
3665 | 3718 | ||
3666 | 3719 | ||
3667 | #ifdef CONFIG_DEBUG_SLAB | 3720 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) |
3668 | void *__kmalloc(size_t size, gfp_t flags) | 3721 | void *__kmalloc(size_t size, gfp_t flags) |
3669 | { | 3722 | { |
3670 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3723 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
@@ -3703,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3703 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3756 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3704 | __cache_free(cachep, objp); | 3757 | __cache_free(cachep, objp); |
3705 | local_irq_restore(flags); | 3758 | local_irq_restore(flags); |
3759 | |||
3760 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); | ||
3706 | } | 3761 | } |
3707 | EXPORT_SYMBOL(kmem_cache_free); | 3762 | EXPORT_SYMBOL(kmem_cache_free); |
3708 | 3763 | ||
@@ -3729,6 +3784,8 @@ void kfree(const void *objp) | |||
3729 | debug_check_no_obj_freed(objp, obj_size(c)); | 3784 | debug_check_no_obj_freed(objp, obj_size(c)); |
3730 | __cache_free(c, (void *)objp); | 3785 | __cache_free(c, (void *)objp); |
3731 | local_irq_restore(flags); | 3786 | local_irq_restore(flags); |
3787 | |||
3788 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); | ||
3732 | } | 3789 | } |
3733 | EXPORT_SYMBOL(kfree); | 3790 | EXPORT_SYMBOL(kfree); |
3734 | 3791 | ||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <trace/kmemtrace.h> | ||
68 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
69 | 70 | ||
70 | /* | 71 | /* |
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
474 | { | 475 | { |
475 | unsigned int *m; | 476 | unsigned int *m; |
476 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 477 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
478 | void *ret; | ||
477 | 479 | ||
478 | lockdep_trace_alloc(gfp); | 480 | lockdep_trace_alloc(gfp); |
479 | 481 | ||
@@ -482,12 +484,17 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
482 | return ZERO_SIZE_PTR; | 484 | return ZERO_SIZE_PTR; |
483 | 485 | ||
484 | m = slob_alloc(size + align, gfp, align, node); | 486 | m = slob_alloc(size + align, gfp, align, node); |
487 | |||
485 | if (!m) | 488 | if (!m) |
486 | return NULL; | 489 | return NULL; |
487 | *m = size; | 490 | *m = size; |
488 | return (void *)m + align; | 491 | ret = (void *)m + align; |
492 | |||
493 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
494 | _RET_IP_, ret, | ||
495 | size, size + align, gfp, node); | ||
489 | } else { | 496 | } else { |
490 | void *ret; | 497 | unsigned int order = get_order(size); |
491 | 498 | ||
492 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); | 499 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
493 | if (ret) { | 500 | if (ret) { |
@@ -495,8 +502,13 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
495 | page = virt_to_page(ret); | 502 | page = virt_to_page(ret); |
496 | page->private = size; | 503 | page->private = size; |
497 | } | 504 | } |
498 | return ret; | 505 | |
506 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
507 | _RET_IP_, ret, | ||
508 | size, PAGE_SIZE << order, gfp, node); | ||
499 | } | 509 | } |
510 | |||
511 | return ret; | ||
500 | } | 512 | } |
501 | EXPORT_SYMBOL(__kmalloc_node); | 513 | EXPORT_SYMBOL(__kmalloc_node); |
502 | 514 | ||
@@ -514,6 +526,8 @@ void kfree(const void *block) | |||
514 | slob_free(m, *m + align); | 526 | slob_free(m, *m + align); |
515 | } else | 527 | } else |
516 | put_page(&sp->page); | 528 | put_page(&sp->page); |
529 | |||
530 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block); | ||
517 | } | 531 | } |
518 | EXPORT_SYMBOL(kfree); | 532 | EXPORT_SYMBOL(kfree); |
519 | 533 | ||
@@ -583,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
583 | { | 597 | { |
584 | void *b; | 598 | void *b; |
585 | 599 | ||
586 | if (c->size < PAGE_SIZE) | 600 | if (c->size < PAGE_SIZE) { |
587 | b = slob_alloc(c->size, flags, c->align, node); | 601 | b = slob_alloc(c->size, flags, c->align, node); |
588 | else | 602 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
603 | _RET_IP_, b, c->size, | ||
604 | SLOB_UNITS(c->size) * SLOB_UNIT, | ||
605 | flags, node); | ||
606 | } else { | ||
589 | b = slob_new_pages(flags, get_order(c->size), node); | 607 | b = slob_new_pages(flags, get_order(c->size), node); |
608 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | ||
609 | _RET_IP_, b, c->size, | ||
610 | PAGE_SIZE << get_order(c->size), | ||
611 | flags, node); | ||
612 | } | ||
590 | 613 | ||
591 | if (c->ctor) | 614 | if (c->ctor) |
592 | c->ctor(b); | 615 | c->ctor(b); |
@@ -622,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
622 | } else { | 645 | } else { |
623 | __kmem_cache_free(b, c->size); | 646 | __kmem_cache_free(b, c->size); |
624 | } | 647 | } |
648 | |||
649 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b); | ||
625 | } | 650 | } |
626 | EXPORT_SYMBOL(kmem_cache_free); | 651 | EXPORT_SYMBOL(kmem_cache_free); |
627 | 652 | ||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <trace/kmemtrace.h> | ||
19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1618 | 1619 | ||
1619 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1620 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1620 | { | 1621 | { |
1621 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1622 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
1623 | |||
1624 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1625 | s->objsize, s->size, gfpflags); | ||
1626 | |||
1627 | return ret; | ||
1622 | } | 1628 | } |
1623 | EXPORT_SYMBOL(kmem_cache_alloc); | 1629 | EXPORT_SYMBOL(kmem_cache_alloc); |
1624 | 1630 | ||
1631 | #ifdef CONFIG_KMEMTRACE | ||
1632 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
1633 | { | ||
1634 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
1635 | } | ||
1636 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
1637 | #endif | ||
1638 | |||
1625 | #ifdef CONFIG_NUMA | 1639 | #ifdef CONFIG_NUMA |
1626 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1640 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1627 | { | 1641 | { |
1628 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1642 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1643 | |||
1644 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
1645 | s->objsize, s->size, gfpflags, node); | ||
1646 | |||
1647 | return ret; | ||
1629 | } | 1648 | } |
1630 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1649 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1631 | #endif | 1650 | #endif |
1632 | 1651 | ||
1652 | #ifdef CONFIG_KMEMTRACE | ||
1653 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
1654 | gfp_t gfpflags, | ||
1655 | int node) | ||
1656 | { | ||
1657 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
1658 | } | ||
1659 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
1660 | #endif | ||
1661 | |||
1633 | /* | 1662 | /* |
1634 | * Slow patch handling. This may still be called frequently since objects | 1663 | * Slow patch handling. This may still be called frequently since objects |
1635 | * have a longer lifetime than the cpu slabs in most processing loads. | 1664 | * have a longer lifetime than the cpu slabs in most processing loads. |
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1737 | page = virt_to_head_page(x); | 1766 | page = virt_to_head_page(x); |
1738 | 1767 | ||
1739 | slab_free(s, page, x, _RET_IP_); | 1768 | slab_free(s, page, x, _RET_IP_); |
1769 | |||
1770 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
1740 | } | 1771 | } |
1741 | EXPORT_SYMBOL(kmem_cache_free); | 1772 | EXPORT_SYMBOL(kmem_cache_free); |
1742 | 1773 | ||
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2659 | void *__kmalloc(size_t size, gfp_t flags) | 2690 | void *__kmalloc(size_t size, gfp_t flags) |
2660 | { | 2691 | { |
2661 | struct kmem_cache *s; | 2692 | struct kmem_cache *s; |
2693 | void *ret; | ||
2662 | 2694 | ||
2663 | if (unlikely(size > SLUB_MAX_SIZE)) | 2695 | if (unlikely(size > SLUB_MAX_SIZE)) |
2664 | return kmalloc_large(size, flags); | 2696 | return kmalloc_large(size, flags); |
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2668 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2700 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2669 | return s; | 2701 | return s; |
2670 | 2702 | ||
2671 | return slab_alloc(s, flags, -1, _RET_IP_); | 2703 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
2704 | |||
2705 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2706 | size, s->size, flags); | ||
2707 | |||
2708 | return ret; | ||
2672 | } | 2709 | } |
2673 | EXPORT_SYMBOL(__kmalloc); | 2710 | EXPORT_SYMBOL(__kmalloc); |
2674 | 2711 | ||
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2687 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2724 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2688 | { | 2725 | { |
2689 | struct kmem_cache *s; | 2726 | struct kmem_cache *s; |
2727 | void *ret; | ||
2690 | 2728 | ||
2691 | if (unlikely(size > SLUB_MAX_SIZE)) | 2729 | if (unlikely(size > SLUB_MAX_SIZE)) { |
2692 | return kmalloc_large_node(size, flags, node); | 2730 | ret = kmalloc_large_node(size, flags, node); |
2731 | |||
2732 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
2733 | _RET_IP_, ret, | ||
2734 | size, PAGE_SIZE << get_order(size), | ||
2735 | flags, node); | ||
2736 | |||
2737 | return ret; | ||
2738 | } | ||
2693 | 2739 | ||
2694 | s = get_slab(size, flags); | 2740 | s = get_slab(size, flags); |
2695 | 2741 | ||
2696 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2742 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2697 | return s; | 2743 | return s; |
2698 | 2744 | ||
2699 | return slab_alloc(s, flags, node, _RET_IP_); | 2745 | ret = slab_alloc(s, flags, node, _RET_IP_); |
2746 | |||
2747 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
2748 | size, s->size, flags, node); | ||
2749 | |||
2750 | return ret; | ||
2700 | } | 2751 | } |
2701 | EXPORT_SYMBOL(__kmalloc_node); | 2752 | EXPORT_SYMBOL(__kmalloc_node); |
2702 | #endif | 2753 | #endif |
@@ -2755,6 +2806,8 @@ void kfree(const void *x) | |||
2755 | return; | 2806 | return; |
2756 | } | 2807 | } |
2757 | slab_free(page->slab, page, object, _RET_IP_); | 2808 | slab_free(page->slab, page, object, _RET_IP_); |
2809 | |||
2810 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
2758 | } | 2811 | } |
2759 | EXPORT_SYMBOL(kfree); | 2812 | EXPORT_SYMBOL(kfree); |
2760 | 2813 | ||
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3224 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3277 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3225 | { | 3278 | { |
3226 | struct kmem_cache *s; | 3279 | struct kmem_cache *s; |
3280 | void *ret; | ||
3227 | 3281 | ||
3228 | if (unlikely(size > SLUB_MAX_SIZE)) | 3282 | if (unlikely(size > SLUB_MAX_SIZE)) |
3229 | return kmalloc_large(size, gfpflags); | 3283 | return kmalloc_large(size, gfpflags); |
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3233 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3287 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3234 | return s; | 3288 | return s; |
3235 | 3289 | ||
3236 | return slab_alloc(s, gfpflags, -1, caller); | 3290 | ret = slab_alloc(s, gfpflags, -1, caller); |
3291 | |||
3292 | /* Honor the call site pointer we recieved. */ | ||
3293 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
3294 | s->size, gfpflags); | ||
3295 | |||
3296 | return ret; | ||
3237 | } | 3297 | } |
3238 | 3298 | ||
3239 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3299 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3240 | int node, unsigned long caller) | 3300 | int node, unsigned long caller) |
3241 | { | 3301 | { |
3242 | struct kmem_cache *s; | 3302 | struct kmem_cache *s; |
3303 | void *ret; | ||
3243 | 3304 | ||
3244 | if (unlikely(size > SLUB_MAX_SIZE)) | 3305 | if (unlikely(size > SLUB_MAX_SIZE)) |
3245 | return kmalloc_large_node(size, gfpflags, node); | 3306 | return kmalloc_large_node(size, gfpflags, node); |
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3249 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3310 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3250 | return s; | 3311 | return s; |
3251 | 3312 | ||
3252 | return slab_alloc(s, gfpflags, node, caller); | 3313 | ret = slab_alloc(s, gfpflags, node, caller); |
3314 | |||
3315 | /* Honor the call site pointer we recieved. */ | ||
3316 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
3317 | size, s->size, gfpflags, node); | ||
3318 | |||
3319 | return ret; | ||
3253 | } | 3320 | } |
3254 | 3321 | ||
3255 | #ifdef CONFIG_SLUB_DEBUG | 3322 | #ifdef CONFIG_SLUB_DEBUG |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 425244988bb2..39fdfb14eeaa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1967,7 +1967,7 @@ static int kswapd(void *p) | |||
1967 | struct reclaim_state reclaim_state = { | 1967 | struct reclaim_state reclaim_state = { |
1968 | .reclaimed_slab = 0, | 1968 | .reclaimed_slab = 0, |
1969 | }; | 1969 | }; |
1970 | node_to_cpumask_ptr(cpumask, pgdat->node_id); | 1970 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
1971 | 1971 | ||
1972 | lockdep_set_current_reclaim_state(GFP_KERNEL); | 1972 | lockdep_set_current_reclaim_state(GFP_KERNEL); |
1973 | 1973 | ||
@@ -2204,7 +2204,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb, | |||
2204 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { | 2204 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { |
2205 | for_each_node_state(nid, N_HIGH_MEMORY) { | 2205 | for_each_node_state(nid, N_HIGH_MEMORY) { |
2206 | pg_data_t *pgdat = NODE_DATA(nid); | 2206 | pg_data_t *pgdat = NODE_DATA(nid); |
2207 | node_to_cpumask_ptr(mask, pgdat->node_id); | 2207 | const struct cpumask *mask; |
2208 | |||
2209 | mask = cpumask_of_node(pgdat->node_id); | ||
2208 | 2210 | ||
2209 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) | 2211 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) |
2210 | /* One of our CPUs online: restore mask */ | 2212 | /* One of our CPUs online: restore mask */ |