aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/backing-dev.c10
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/quicklist.c2
-rw-r--r--mm/slab.c71
-rw-r--r--mm/slob.c31
-rw-r--r--mm/slub.c77
-rw-r--r--mm/util.c16
-rw-r--r--mm/vmscan.c6
11 files changed, 191 insertions, 33 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 139d5b7b6621..dfdee6a47359 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -31,7 +31,7 @@ static void percpu_depopulate(void *__pdata, int cpu)
31 * @__pdata: per-cpu data to depopulate 31 * @__pdata: per-cpu data to depopulate
32 * @mask: depopulate per-cpu data for cpu's selected through mask bits 32 * @mask: depopulate per-cpu data for cpu's selected through mask bits
33 */ 33 */
34static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) 34static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
35{ 35{
36 int cpu; 36 int cpu;
37 for_each_cpu_mask_nr(cpu, *mask) 37 for_each_cpu_mask_nr(cpu, *mask)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index be68c956a660..493b468a5035 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = {
284 }; 284 };
285 285
286 286
287void clear_bdi_congested(struct backing_dev_info *bdi, int rw) 287void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
288{ 288{
289 enum bdi_state bit; 289 enum bdi_state bit;
290 wait_queue_head_t *wqh = &congestion_wqh[rw]; 290 wait_queue_head_t *wqh = &congestion_wqh[sync];
291 291
292 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; 292 bit = sync ? BDI_sync_congested : BDI_async_congested;
293 clear_bit(bit, &bdi->state); 293 clear_bit(bit, &bdi->state);
294 smp_mb__after_clear_bit(); 294 smp_mb__after_clear_bit();
295 if (waitqueue_active(wqh)) 295 if (waitqueue_active(wqh))
@@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
297} 297}
298EXPORT_SYMBOL(clear_bdi_congested); 298EXPORT_SYMBOL(clear_bdi_congested);
299 299
300void set_bdi_congested(struct backing_dev_info *bdi, int rw) 300void set_bdi_congested(struct backing_dev_info *bdi, int sync)
301{ 301{
302 enum bdi_state bit; 302 enum bdi_state bit;
303 303
304 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; 304 bit = sync ? BDI_sync_congested : BDI_async_congested;
305 set_bit(bit, &bdi->state); 305 set_bit(bit, &bdi->state);
306} 306}
307EXPORT_SYMBOL(set_bdi_congested); 307EXPORT_SYMBOL(set_bdi_congested);
diff --git a/mm/failslab.c b/mm/failslab.c
index 7c6ea6493f80..9339de5f0a91 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,4 +1,5 @@
1#include <linux/fault-inject.h> 1#include <linux/fault-inject.h>
2#include <linux/gfp.h>
2 3
3static struct { 4static struct {
4 struct fault_attr attr; 5 struct fault_attr attr;
diff --git a/mm/filemap.c b/mm/filemap.c
index fc11974f2bee..2e2d38ebda4b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -513,6 +513,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
513 } 513 }
514 return ret; 514 return ret;
515} 515}
516EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
516 517
517#ifdef CONFIG_NUMA 518#ifdef CONFIG_NUMA
518struct page *__page_cache_alloc(gfp_t gfp) 519struct page *__page_cache_alloc(gfp_t gfp)
@@ -645,6 +646,7 @@ int __lock_page_killable(struct page *page)
645 return __wait_on_bit_lock(page_waitqueue(page), &wait, 646 return __wait_on_bit_lock(page_waitqueue(page), &wait,
646 sync_page_killable, TASK_KILLABLE); 647 sync_page_killable, TASK_KILLABLE);
647} 648}
649EXPORT_SYMBOL_GPL(__lock_page_killable);
648 650
649/** 651/**
650 * __lock_page_nosync - get a lock on the page, without calling sync_page() 652 * __lock_page_nosync - get a lock on the page, without calling sync_page()
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3f30189896fd..e2f26991fff1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2128,7 +2128,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2128 int n, val; 2128 int n, val;
2129 int min_val = INT_MAX; 2129 int min_val = INT_MAX;
2130 int best_node = -1; 2130 int best_node = -1;
2131 node_to_cpumask_ptr(tmp, 0); 2131 const struct cpumask *tmp = cpumask_of_node(0);
2132 2132
2133 /* Use the local node if we haven't already */ 2133 /* Use the local node if we haven't already */
2134 if (!node_isset(node, *used_node_mask)) { 2134 if (!node_isset(node, *used_node_mask)) {
@@ -2149,8 +2149,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2149 val += (n < node); 2149 val += (n < node);
2150 2150
2151 /* Give preference to headless and unused nodes */ 2151 /* Give preference to headless and unused nodes */
2152 node_to_cpumask_ptr_next(tmp, n); 2152 tmp = cpumask_of_node(n);
2153 if (!cpus_empty(*tmp)) 2153 if (!cpumask_empty(tmp))
2154 val += PENALTY_FOR_NODE_WITH_CPUS; 2154 val += PENALTY_FOR_NODE_WITH_CPUS;
2155 2155
2156 /* Slight preference for less loaded node */ 2156 /* Slight preference for less loaded node */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 8dbb6805ef35..e66d07d1b4ff 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages)
29 int node = numa_node_id(); 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones; 30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node; 31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node); 32 const struct cpumask *cpumask_on_node = cpumask_of_node(node);
33 33
34 node_free_pages = 34 node_free_pages =
35#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
diff --git a/mm/slab.c b/mm/slab.c
index 208323fd37bc..9a90b00d2f91 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
102#include <linux/cpu.h> 102#include <linux/cpu.h>
103#include <linux/sysctl.h> 103#include <linux/sysctl.h>
104#include <linux/module.h> 104#include <linux/module.h>
105#include <trace/kmemtrace.h>
105#include <linux/rcupdate.h> 106#include <linux/rcupdate.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/uaccess.h> 108#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -1160,7 +1169,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1160 struct kmem_cache *cachep; 1169 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL; 1170 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu); 1171 int node = cpu_to_node(cpu);
1163 node_to_cpumask_ptr(mask, node); 1172 const struct cpumask *mask = cpumask_of_node(node);
1164 1173
1165 list_for_each_entry(cachep, &cache_chain, next) { 1174 list_for_each_entry(cachep, &cache_chain, next) {
1166 struct array_cache *nc; 1175 struct array_cache *nc;
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3554 */ 3563 */
3555void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3564void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3556{ 3565{
3557 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567
3568 trace_kmem_cache_alloc(_RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags);
3570
3571 return ret;
3558} 3572}
3559EXPORT_SYMBOL(kmem_cache_alloc); 3573EXPORT_SYMBOL(kmem_cache_alloc);
3560 3574
3575#ifdef CONFIG_KMEMTRACE
3576void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3577{
3578 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3579}
3580EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3581#endif
3582
3561/** 3583/**
3562 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3584 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3563 * @cachep: the cache we're checking against 3585 * @cachep: the cache we're checking against
@@ -3602,23 +3624,46 @@ out:
3602#ifdef CONFIG_NUMA 3624#ifdef CONFIG_NUMA
3603void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3625void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3604{ 3626{
3605 return __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3606 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629
3630 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid);
3633
3634 return ret;
3607} 3635}
3608EXPORT_SYMBOL(kmem_cache_alloc_node); 3636EXPORT_SYMBOL(kmem_cache_alloc_node);
3609 3637
3638#ifdef CONFIG_KMEMTRACE
3639void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3640 gfp_t flags,
3641 int nodeid)
3642{
3643 return __cache_alloc_node(cachep, flags, nodeid,
3644 __builtin_return_address(0));
3645}
3646EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3647#endif
3648
3610static __always_inline void * 3649static __always_inline void *
3611__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3650__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3612{ 3651{
3613 struct kmem_cache *cachep; 3652 struct kmem_cache *cachep;
3653 void *ret;
3614 3654
3615 cachep = kmem_find_general_cachep(size, flags); 3655 cachep = kmem_find_general_cachep(size, flags);
3616 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3617 return cachep; 3657 return cachep;
3618 return kmem_cache_alloc_node(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659
3660 trace_kmalloc_node((unsigned long) caller, ret,
3661 size, cachep->buffer_size, flags, node);
3662
3663 return ret;
3619} 3664}
3620 3665
3621#ifdef CONFIG_DEBUG_SLAB 3666#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3622void *__kmalloc_node(size_t size, gfp_t flags, int node) 3667void *__kmalloc_node(size_t size, gfp_t flags, int node)
3623{ 3668{
3624 return __do_kmalloc_node(size, flags, node, 3669 return __do_kmalloc_node(size, flags, node,
@@ -3651,6 +3696,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3651 void *caller) 3696 void *caller)
3652{ 3697{
3653 struct kmem_cache *cachep; 3698 struct kmem_cache *cachep;
3699 void *ret;
3654 3700
3655 /* If you want to save a few bytes .text space: replace 3701 /* If you want to save a few bytes .text space: replace
3656 * __ with kmem_. 3702 * __ with kmem_.
@@ -3660,11 +3706,16 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3660 cachep = __find_general_cachep(size, flags); 3706 cachep = __find_general_cachep(size, flags);
3661 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3707 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3662 return cachep; 3708 return cachep;
3663 return __cache_alloc(cachep, flags, caller); 3709 ret = __cache_alloc(cachep, flags, caller);
3710
3711 trace_kmalloc((unsigned long) caller, ret,
3712 size, cachep->buffer_size, flags);
3713
3714 return ret;
3664} 3715}
3665 3716
3666 3717
3667#ifdef CONFIG_DEBUG_SLAB 3718#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3668void *__kmalloc(size_t size, gfp_t flags) 3719void *__kmalloc(size_t size, gfp_t flags)
3669{ 3720{
3670 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3721 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3703,6 +3754,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3703 debug_check_no_obj_freed(objp, obj_size(cachep)); 3754 debug_check_no_obj_freed(objp, obj_size(cachep));
3704 __cache_free(cachep, objp); 3755 __cache_free(cachep, objp);
3705 local_irq_restore(flags); 3756 local_irq_restore(flags);
3757
3758 trace_kmem_cache_free(_RET_IP_, objp);
3706} 3759}
3707EXPORT_SYMBOL(kmem_cache_free); 3760EXPORT_SYMBOL(kmem_cache_free);
3708 3761
@@ -3720,6 +3773,8 @@ void kfree(const void *objp)
3720 struct kmem_cache *c; 3773 struct kmem_cache *c;
3721 unsigned long flags; 3774 unsigned long flags;
3722 3775
3776 trace_kfree(_RET_IP_, objp);
3777
3723 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3778 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3724 return; 3779 return;
3725 local_irq_save(flags); 3780 local_irq_save(flags);
diff --git a/mm/slob.c b/mm/slob.c
index 7a3411524dac..a2d4ab32198d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
474{ 475{
475 unsigned int *m; 476 unsigned int *m;
476 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 477 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
478 void *ret;
477 479
478 lockdep_trace_alloc(gfp); 480 lockdep_trace_alloc(gfp);
479 481
@@ -482,12 +484,16 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
482 return ZERO_SIZE_PTR; 484 return ZERO_SIZE_PTR;
483 485
484 m = slob_alloc(size + align, gfp, align, node); 486 m = slob_alloc(size + align, gfp, align, node);
487
485 if (!m) 488 if (!m)
486 return NULL; 489 return NULL;
487 *m = size; 490 *m = size;
488 return (void *)m + align; 491 ret = (void *)m + align;
492
493 trace_kmalloc_node(_RET_IP_, ret,
494 size, size + align, gfp, node);
489 } else { 495 } else {
490 void *ret; 496 unsigned int order = get_order(size);
491 497
492 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 498 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
493 if (ret) { 499 if (ret) {
@@ -495,8 +501,12 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
495 page = virt_to_page(ret); 501 page = virt_to_page(ret);
496 page->private = size; 502 page->private = size;
497 } 503 }
498 return ret; 504
505 trace_kmalloc_node(_RET_IP_, ret,
506 size, PAGE_SIZE << order, gfp, node);
499 } 507 }
508
509 return ret;
500} 510}
501EXPORT_SYMBOL(__kmalloc_node); 511EXPORT_SYMBOL(__kmalloc_node);
502 512
@@ -504,6 +514,8 @@ void kfree(const void *block)
504{ 514{
505 struct slob_page *sp; 515 struct slob_page *sp;
506 516
517 trace_kfree(_RET_IP_, block);
518
507 if (unlikely(ZERO_OR_NULL_PTR(block))) 519 if (unlikely(ZERO_OR_NULL_PTR(block)))
508 return; 520 return;
509 521
@@ -583,10 +595,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
583{ 595{
584 void *b; 596 void *b;
585 597
586 if (c->size < PAGE_SIZE) 598 if (c->size < PAGE_SIZE) {
587 b = slob_alloc(c->size, flags, c->align, node); 599 b = slob_alloc(c->size, flags, c->align, node);
588 else 600 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
601 SLOB_UNITS(c->size) * SLOB_UNIT,
602 flags, node);
603 } else {
589 b = slob_new_pages(flags, get_order(c->size), node); 604 b = slob_new_pages(flags, get_order(c->size), node);
605 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
606 PAGE_SIZE << get_order(c->size),
607 flags, node);
608 }
590 609
591 if (c->ctor) 610 if (c->ctor)
592 c->ctor(b); 611 c->ctor(b);
@@ -622,6 +641,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
622 } else { 641 } else {
623 __kmem_cache_free(b, c->size); 642 __kmem_cache_free(b, c->size);
624 } 643 }
644
645 trace_kmem_cache_free(_RET_IP_, b);
625} 646}
626EXPORT_SYMBOL(kmem_cache_free); 647EXPORT_SYMBOL(kmem_cache_free);
627 648
diff --git a/mm/slub.c b/mm/slub.c
index c4ea9158c9fb..7ab54ecbd3f3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1618,18 +1619,45 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1618 1619
1619void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1620void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1620{ 1621{
1621 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1622 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1623
1624 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1625
1626 return ret;
1622} 1627}
1623EXPORT_SYMBOL(kmem_cache_alloc); 1628EXPORT_SYMBOL(kmem_cache_alloc);
1624 1629
1630#ifdef CONFIG_KMEMTRACE
1631void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1632{
1633 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1634}
1635EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1636#endif
1637
1625#ifdef CONFIG_NUMA 1638#ifdef CONFIG_NUMA
1626void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1639void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1627{ 1640{
1628 return slab_alloc(s, gfpflags, node, _RET_IP_); 1641 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1642
1643 trace_kmem_cache_alloc_node(_RET_IP_, ret,
1644 s->objsize, s->size, gfpflags, node);
1645
1646 return ret;
1629} 1647}
1630EXPORT_SYMBOL(kmem_cache_alloc_node); 1648EXPORT_SYMBOL(kmem_cache_alloc_node);
1631#endif 1649#endif
1632 1650
1651#ifdef CONFIG_KMEMTRACE
1652void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1653 gfp_t gfpflags,
1654 int node)
1655{
1656 return slab_alloc(s, gfpflags, node, _RET_IP_);
1657}
1658EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1659#endif
1660
1633/* 1661/*
1634 * Slow patch handling. This may still be called frequently since objects 1662 * Slow patch handling. This may still be called frequently since objects
1635 * have a longer lifetime than the cpu slabs in most processing loads. 1663 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1737,6 +1765,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1737 page = virt_to_head_page(x); 1765 page = virt_to_head_page(x);
1738 1766
1739 slab_free(s, page, x, _RET_IP_); 1767 slab_free(s, page, x, _RET_IP_);
1768
1769 trace_kmem_cache_free(_RET_IP_, x);
1740} 1770}
1741EXPORT_SYMBOL(kmem_cache_free); 1771EXPORT_SYMBOL(kmem_cache_free);
1742 1772
@@ -2659,6 +2689,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2659void *__kmalloc(size_t size, gfp_t flags) 2689void *__kmalloc(size_t size, gfp_t flags)
2660{ 2690{
2661 struct kmem_cache *s; 2691 struct kmem_cache *s;
2692 void *ret;
2662 2693
2663 if (unlikely(size > SLUB_MAX_SIZE)) 2694 if (unlikely(size > SLUB_MAX_SIZE))
2664 return kmalloc_large(size, flags); 2695 return kmalloc_large(size, flags);
@@ -2668,7 +2699,11 @@ void *__kmalloc(size_t size, gfp_t flags)
2668 if (unlikely(ZERO_OR_NULL_PTR(s))) 2699 if (unlikely(ZERO_OR_NULL_PTR(s)))
2669 return s; 2700 return s;
2670 2701
2671 return slab_alloc(s, flags, -1, _RET_IP_); 2702 ret = slab_alloc(s, flags, -1, _RET_IP_);
2703
2704 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2705
2706 return ret;
2672} 2707}
2673EXPORT_SYMBOL(__kmalloc); 2708EXPORT_SYMBOL(__kmalloc);
2674 2709
@@ -2687,16 +2722,28 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2687void *__kmalloc_node(size_t size, gfp_t flags, int node) 2722void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688{ 2723{
2689 struct kmem_cache *s; 2724 struct kmem_cache *s;
2725 void *ret;
2690 2726
2691 if (unlikely(size > SLUB_MAX_SIZE)) 2727 if (unlikely(size > SLUB_MAX_SIZE)) {
2692 return kmalloc_large_node(size, flags, node); 2728 ret = kmalloc_large_node(size, flags, node);
2729
2730 trace_kmalloc_node(_RET_IP_, ret,
2731 size, PAGE_SIZE << get_order(size),
2732 flags, node);
2733
2734 return ret;
2735 }
2693 2736
2694 s = get_slab(size, flags); 2737 s = get_slab(size, flags);
2695 2738
2696 if (unlikely(ZERO_OR_NULL_PTR(s))) 2739 if (unlikely(ZERO_OR_NULL_PTR(s)))
2697 return s; 2740 return s;
2698 2741
2699 return slab_alloc(s, flags, node, _RET_IP_); 2742 ret = slab_alloc(s, flags, node, _RET_IP_);
2743
2744 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2745
2746 return ret;
2700} 2747}
2701EXPORT_SYMBOL(__kmalloc_node); 2748EXPORT_SYMBOL(__kmalloc_node);
2702#endif 2749#endif
@@ -2745,6 +2792,8 @@ void kfree(const void *x)
2745 struct page *page; 2792 struct page *page;
2746 void *object = (void *)x; 2793 void *object = (void *)x;
2747 2794
2795 trace_kfree(_RET_IP_, x);
2796
2748 if (unlikely(ZERO_OR_NULL_PTR(x))) 2797 if (unlikely(ZERO_OR_NULL_PTR(x)))
2749 return; 2798 return;
2750 2799
@@ -3224,6 +3273,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3224void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3273void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3225{ 3274{
3226 struct kmem_cache *s; 3275 struct kmem_cache *s;
3276 void *ret;
3227 3277
3228 if (unlikely(size > SLUB_MAX_SIZE)) 3278 if (unlikely(size > SLUB_MAX_SIZE))
3229 return kmalloc_large(size, gfpflags); 3279 return kmalloc_large(size, gfpflags);
@@ -3233,13 +3283,19 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3233 if (unlikely(ZERO_OR_NULL_PTR(s))) 3283 if (unlikely(ZERO_OR_NULL_PTR(s)))
3234 return s; 3284 return s;
3235 3285
3236 return slab_alloc(s, gfpflags, -1, caller); 3286 ret = slab_alloc(s, gfpflags, -1, caller);
3287
3288 /* Honor the call site pointer we recieved. */
3289 trace_kmalloc(caller, ret, size, s->size, gfpflags);
3290
3291 return ret;
3237} 3292}
3238 3293
3239void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3294void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3240 int node, unsigned long caller) 3295 int node, unsigned long caller)
3241{ 3296{
3242 struct kmem_cache *s; 3297 struct kmem_cache *s;
3298 void *ret;
3243 3299
3244 if (unlikely(size > SLUB_MAX_SIZE)) 3300 if (unlikely(size > SLUB_MAX_SIZE))
3245 return kmalloc_large_node(size, gfpflags, node); 3301 return kmalloc_large_node(size, gfpflags, node);
@@ -3249,7 +3305,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3249 if (unlikely(ZERO_OR_NULL_PTR(s))) 3305 if (unlikely(ZERO_OR_NULL_PTR(s)))
3250 return s; 3306 return s;
3251 3307
3252 return slab_alloc(s, gfpflags, node, caller); 3308 ret = slab_alloc(s, gfpflags, node, caller);
3309
3310 /* Honor the call site pointer we recieved. */
3311 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3312
3313 return ret;
3253} 3314}
3254 3315
3255#ifdef CONFIG_SLUB_DEBUG 3316#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/util.c b/mm/util.c
index 7c122e49f769..2599e83eea17 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/tracepoint.h>
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8 9
9/** 10/**
@@ -236,3 +237,18 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
236 return ret; 237 return ret;
237} 238}
238EXPORT_SYMBOL_GPL(get_user_pages_fast); 239EXPORT_SYMBOL_GPL(get_user_pages_fast);
240
241/* Tracepoints definitions. */
242DEFINE_TRACE(kmalloc);
243DEFINE_TRACE(kmem_cache_alloc);
244DEFINE_TRACE(kmalloc_node);
245DEFINE_TRACE(kmem_cache_alloc_node);
246DEFINE_TRACE(kfree);
247DEFINE_TRACE(kmem_cache_free);
248
249EXPORT_TRACEPOINT_SYMBOL(kmalloc);
250EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
251EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
252EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
253EXPORT_TRACEPOINT_SYMBOL(kfree);
254EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 425244988bb2..39fdfb14eeaa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1967,7 +1967,7 @@ static int kswapd(void *p)
1967 struct reclaim_state reclaim_state = { 1967 struct reclaim_state reclaim_state = {
1968 .reclaimed_slab = 0, 1968 .reclaimed_slab = 0,
1969 }; 1969 };
1970 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1970 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1971 1971
1972 lockdep_set_current_reclaim_state(GFP_KERNEL); 1972 lockdep_set_current_reclaim_state(GFP_KERNEL);
1973 1973
@@ -2204,7 +2204,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2204 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2204 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2205 for_each_node_state(nid, N_HIGH_MEMORY) { 2205 for_each_node_state(nid, N_HIGH_MEMORY) {
2206 pg_data_t *pgdat = NODE_DATA(nid); 2206 pg_data_t *pgdat = NODE_DATA(nid);
2207 node_to_cpumask_ptr(mask, pgdat->node_id); 2207 const struct cpumask *mask;
2208
2209 mask = cpumask_of_node(pgdat->node_id);
2208 2210
2209 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2211 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2210 /* One of our CPUs online: restore mask */ 2212 /* One of our CPUs online: restore mask */