aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/slab.c75
-rw-r--r--mm/slob.c39
-rw-r--r--mm/slub.c98
-rw-r--r--mm/vmscan.c2
5 files changed, 191 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c44ed49ca93..a3803ea8c27d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1479 unsigned long did_some_progress; 1479 unsigned long did_some_progress;
1480 unsigned long pages_reclaimed = 0; 1480 unsigned long pages_reclaimed = 0;
1481 1481
1482 lockdep_trace_alloc(gfp_mask);
1483
1482 might_sleep_if(wait); 1484 might_sleep_if(wait);
1483 1485
1484 if (should_fail_alloc_page(gfp_mask, order)) 1486 if (should_fail_alloc_page(gfp_mask, order))
@@ -1578,12 +1580,15 @@ nofail_alloc:
1578 */ 1580 */
1579 cpuset_update_task_memory_state(); 1581 cpuset_update_task_memory_state();
1580 p->flags |= PF_MEMALLOC; 1582 p->flags |= PF_MEMALLOC;
1583
1584 lockdep_set_current_reclaim_state(gfp_mask);
1581 reclaim_state.reclaimed_slab = 0; 1585 reclaim_state.reclaimed_slab = 0;
1582 p->reclaim_state = &reclaim_state; 1586 p->reclaim_state = &reclaim_state;
1583 1587
1584 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); 1588 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1585 1589
1586 p->reclaim_state = NULL; 1590 p->reclaim_state = NULL;
1591 lockdep_clear_current_reclaim_state();
1587 p->flags &= ~PF_MEMALLOC; 1592 p->flags &= ~PF_MEMALLOC;
1588 1593
1589 cond_resched(); 1594 cond_resched();
diff --git a/mm/slab.c b/mm/slab.c
index 4d00855629c4..9ec66c3e6ee0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,6 +102,7 @@
102#include <linux/cpu.h> 102#include <linux/cpu.h>
103#include <linux/sysctl.h> 103#include <linux/sysctl.h>
104#include <linux/module.h> 104#include <linux/module.h>
105#include <trace/kmemtrace.h>
105#include <linux/rcupdate.h> 106#include <linux/rcupdate.h>
106#include <linux/string.h> 107#include <linux/string.h>
107#include <linux/uaccess.h> 108#include <linux/uaccess.h>
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
568 569
569#endif 570#endif
570 571
572#ifdef CONFIG_KMEMTRACE
573size_t slab_buffer_size(struct kmem_cache *cachep)
574{
575 return cachep->buffer_size;
576}
577EXPORT_SYMBOL(slab_buffer_size);
578#endif
579
571/* 580/*
572 * Do not go above this order unless 0 objects fit into the slab. 581 * Do not go above this order unless 0 objects fit into the slab.
573 */ 582 */
@@ -3318,6 +3327,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3318 unsigned long save_flags; 3327 unsigned long save_flags;
3319 void *ptr; 3328 void *ptr;
3320 3329
3330 lockdep_trace_alloc(flags);
3331
3321 if (slab_should_failslab(cachep, flags)) 3332 if (slab_should_failslab(cachep, flags))
3322 return NULL; 3333 return NULL;
3323 3334
@@ -3394,6 +3405,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3394 unsigned long save_flags; 3405 unsigned long save_flags;
3395 void *objp; 3406 void *objp;
3396 3407
3408 lockdep_trace_alloc(flags);
3409
3397 if (slab_should_failslab(cachep, flags)) 3410 if (slab_should_failslab(cachep, flags))
3398 return NULL; 3411 return NULL;
3399 3412
@@ -3550,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3550 */ 3563 */
3551void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3564void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3552{ 3565{
3553 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3566 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3567
3568 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3569 obj_size(cachep), cachep->buffer_size, flags);
3570
3571 return ret;
3554} 3572}
3555EXPORT_SYMBOL(kmem_cache_alloc); 3573EXPORT_SYMBOL(kmem_cache_alloc);
3556 3574
3575#ifdef CONFIG_KMEMTRACE
3576void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3577{
3578 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3579}
3580EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3581#endif
3582
3557/** 3583/**
3558 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3584 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3559 * @cachep: the cache we're checking against 3585 * @cachep: the cache we're checking against
@@ -3598,23 +3624,47 @@ out:
3598#ifdef CONFIG_NUMA 3624#ifdef CONFIG_NUMA
3599void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3625void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3600{ 3626{
3601 return __cache_alloc_node(cachep, flags, nodeid, 3627 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3602 __builtin_return_address(0)); 3628 __builtin_return_address(0));
3629
3630 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
3631 obj_size(cachep), cachep->buffer_size,
3632 flags, nodeid);
3633
3634 return ret;
3603} 3635}
3604EXPORT_SYMBOL(kmem_cache_alloc_node); 3636EXPORT_SYMBOL(kmem_cache_alloc_node);
3605 3637
3638#ifdef CONFIG_KMEMTRACE
3639void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3640 gfp_t flags,
3641 int nodeid)
3642{
3643 return __cache_alloc_node(cachep, flags, nodeid,
3644 __builtin_return_address(0));
3645}
3646EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3647#endif
3648
3606static __always_inline void * 3649static __always_inline void *
3607__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3650__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3608{ 3651{
3609 struct kmem_cache *cachep; 3652 struct kmem_cache *cachep;
3653 void *ret;
3610 3654
3611 cachep = kmem_find_general_cachep(size, flags); 3655 cachep = kmem_find_general_cachep(size, flags);
3612 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3613 return cachep; 3657 return cachep;
3614 return kmem_cache_alloc_node(cachep, flags, node); 3658 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3659
3660 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
3661 (unsigned long) caller, ret,
3662 size, cachep->buffer_size, flags, node);
3663
3664 return ret;
3615} 3665}
3616 3666
3617#ifdef CONFIG_DEBUG_SLAB 3667#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3618void *__kmalloc_node(size_t size, gfp_t flags, int node) 3668void *__kmalloc_node(size_t size, gfp_t flags, int node)
3619{ 3669{
3620 return __do_kmalloc_node(size, flags, node, 3670 return __do_kmalloc_node(size, flags, node,
@@ -3647,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647 void *caller) 3697 void *caller)
3648{ 3698{
3649 struct kmem_cache *cachep; 3699 struct kmem_cache *cachep;
3700 void *ret;
3650 3701
3651 /* If you want to save a few bytes .text space: replace 3702 /* If you want to save a few bytes .text space: replace
3652 * __ with kmem_. 3703 * __ with kmem_.
@@ -3656,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3656 cachep = __find_general_cachep(size, flags); 3707 cachep = __find_general_cachep(size, flags);
3657 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3708 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3658 return cachep; 3709 return cachep;
3659 return __cache_alloc(cachep, flags, caller); 3710 ret = __cache_alloc(cachep, flags, caller);
3711
3712 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
3713 (unsigned long) caller, ret,
3714 size, cachep->buffer_size, flags);
3715
3716 return ret;
3660} 3717}
3661 3718
3662 3719
3663#ifdef CONFIG_DEBUG_SLAB 3720#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
3664void *__kmalloc(size_t size, gfp_t flags) 3721void *__kmalloc(size_t size, gfp_t flags)
3665{ 3722{
3666 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3723 return __do_kmalloc(size, flags, __builtin_return_address(0));
@@ -3699,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3699 debug_check_no_obj_freed(objp, obj_size(cachep)); 3756 debug_check_no_obj_freed(objp, obj_size(cachep));
3700 __cache_free(cachep, objp); 3757 __cache_free(cachep, objp);
3701 local_irq_restore(flags); 3758 local_irq_restore(flags);
3759
3760 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
3702} 3761}
3703EXPORT_SYMBOL(kmem_cache_free); 3762EXPORT_SYMBOL(kmem_cache_free);
3704 3763
@@ -3725,6 +3784,8 @@ void kfree(const void *objp)
3725 debug_check_no_obj_freed(objp, obj_size(c)); 3784 debug_check_no_obj_freed(objp, obj_size(c));
3726 __cache_free(c, (void *)objp); 3785 __cache_free(c, (void *)objp);
3727 local_irq_restore(flags); 3786 local_irq_restore(flags);
3787
3788 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
3728} 3789}
3729EXPORT_SYMBOL(kfree); 3790EXPORT_SYMBOL(kfree);
3730 3791
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..596152926a8d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -463,27 +464,40 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{ 464{
464 unsigned int *m; 465 unsigned int *m;
465 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 void *ret;
468
469 lockdep_trace_alloc(flags);
466 470
467 if (size < PAGE_SIZE - align) { 471 if (size < PAGE_SIZE - align) {
468 if (!size) 472 if (!size)
469 return ZERO_SIZE_PTR; 473 return ZERO_SIZE_PTR;
470 474
471 m = slob_alloc(size + align, gfp, align, node); 475 m = slob_alloc(size + align, gfp, align, node);
476
472 if (!m) 477 if (!m)
473 return NULL; 478 return NULL;
474 *m = size; 479 *m = size;
475 return (void *)m + align; 480 ret = (void *)m + align;
481
482 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
483 _RET_IP_, ret,
484 size, size + align, gfp, node);
476 } else { 485 } else {
477 void *ret; 486 unsigned int order = get_order(size);
478 487
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 488 ret = slob_new_page(gfp | __GFP_COMP, order, node);
480 if (ret) { 489 if (ret) {
481 struct page *page; 490 struct page *page;
482 page = virt_to_page(ret); 491 page = virt_to_page(ret);
483 page->private = size; 492 page->private = size;
484 } 493 }
485 return ret; 494
495 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
496 _RET_IP_, ret,
497 size, PAGE_SIZE << order, gfp, node);
486 } 498 }
499
500 return ret;
487} 501}
488EXPORT_SYMBOL(__kmalloc_node); 502EXPORT_SYMBOL(__kmalloc_node);
489 503
@@ -501,6 +515,8 @@ void kfree(const void *block)
501 slob_free(m, *m + align); 515 slob_free(m, *m + align);
502 } else 516 } else
503 put_page(&sp->page); 517 put_page(&sp->page);
518
519 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
504} 520}
505EXPORT_SYMBOL(kfree); 521EXPORT_SYMBOL(kfree);
506 522
@@ -570,10 +586,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
570{ 586{
571 void *b; 587 void *b;
572 588
573 if (c->size < PAGE_SIZE) 589 if (c->size < PAGE_SIZE) {
574 b = slob_alloc(c->size, flags, c->align, node); 590 b = slob_alloc(c->size, flags, c->align, node);
575 else 591 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
592 _RET_IP_, b, c->size,
593 SLOB_UNITS(c->size) * SLOB_UNIT,
594 flags, node);
595 } else {
576 b = slob_new_page(flags, get_order(c->size), node); 596 b = slob_new_page(flags, get_order(c->size), node);
597 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
598 _RET_IP_, b, c->size,
599 PAGE_SIZE << get_order(c->size),
600 flags, node);
601 }
577 602
578 if (c->ctor) 603 if (c->ctor)
579 c->ctor(b); 604 c->ctor(b);
@@ -609,6 +634,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
609 } else { 634 } else {
610 __kmem_cache_free(b, c->size); 635 __kmem_cache_free(b, c->size);
611 } 636 }
637
638 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
612} 639}
613EXPORT_SYMBOL(kmem_cache_free); 640EXPORT_SYMBOL(kmem_cache_free);
614 641
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..816734ed8aa3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <trace/kmemtrace.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/cpuset.h> 21#include <linux/cpuset.h>
21#include <linux/mempolicy.h> 22#include <linux/mempolicy.h>
@@ -1596,6 +1597,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1596 unsigned long flags; 1597 unsigned long flags;
1597 unsigned int objsize; 1598 unsigned int objsize;
1598 1599
1600 lockdep_trace_alloc(gfpflags);
1599 might_sleep_if(gfpflags & __GFP_WAIT); 1601 might_sleep_if(gfpflags & __GFP_WAIT);
1600 1602
1601 if (should_failslab(s->objsize, gfpflags)) 1603 if (should_failslab(s->objsize, gfpflags))
@@ -1623,18 +1625,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1623 1625
1624void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1626void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1625{ 1627{
1626 return slab_alloc(s, gfpflags, -1, _RET_IP_); 1628 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1629
1630 kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1631 s->objsize, s->size, gfpflags);
1632
1633 return ret;
1627} 1634}
1628EXPORT_SYMBOL(kmem_cache_alloc); 1635EXPORT_SYMBOL(kmem_cache_alloc);
1629 1636
1637#ifdef CONFIG_KMEMTRACE
1638void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1639{
1640 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1641}
1642EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1643#endif
1644
1630#ifdef CONFIG_NUMA 1645#ifdef CONFIG_NUMA
1631void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1646void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1632{ 1647{
1633 return slab_alloc(s, gfpflags, node, _RET_IP_); 1648 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1649
1650 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
1651 s->objsize, s->size, gfpflags, node);
1652
1653 return ret;
1634} 1654}
1635EXPORT_SYMBOL(kmem_cache_alloc_node); 1655EXPORT_SYMBOL(kmem_cache_alloc_node);
1636#endif 1656#endif
1637 1657
1658#ifdef CONFIG_KMEMTRACE
1659void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1660 gfp_t gfpflags,
1661 int node)
1662{
1663 return slab_alloc(s, gfpflags, node, _RET_IP_);
1664}
1665EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1666#endif
1667
1638/* 1668/*
1639 * Slow patch handling. This may still be called frequently since objects 1669 * Slow patch handling. This may still be called frequently since objects
1640 * have a longer lifetime than the cpu slabs in most processing loads. 1670 * have a longer lifetime than the cpu slabs in most processing loads.
@@ -1742,6 +1772,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1742 page = virt_to_head_page(x); 1772 page = virt_to_head_page(x);
1743 1773
1744 slab_free(s, page, x, _RET_IP_); 1774 slab_free(s, page, x, _RET_IP_);
1775
1776 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
1745} 1777}
1746EXPORT_SYMBOL(kmem_cache_free); 1778EXPORT_SYMBOL(kmem_cache_free);
1747 1779
@@ -2475,7 +2507,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2475 * Kmalloc subsystem 2507 * Kmalloc subsystem
2476 *******************************************************************/ 2508 *******************************************************************/
2477 2509
2478struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2510struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2479EXPORT_SYMBOL(kmalloc_caches); 2511EXPORT_SYMBOL(kmalloc_caches);
2480 2512
2481static int __init setup_slub_min_order(char *str) 2513static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2569,7 @@ panic:
2537} 2569}
2538 2570
2539#ifdef CONFIG_ZONE_DMA 2571#ifdef CONFIG_ZONE_DMA
2540static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2572static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2541 2573
2542static void sysfs_add_func(struct work_struct *w) 2574static void sysfs_add_func(struct work_struct *w)
2543{ 2575{
@@ -2657,8 +2689,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2657void *__kmalloc(size_t size, gfp_t flags) 2689void *__kmalloc(size_t size, gfp_t flags)
2658{ 2690{
2659 struct kmem_cache *s; 2691 struct kmem_cache *s;
2692 void *ret;
2660 2693
2661 if (unlikely(size > PAGE_SIZE)) 2694 if (unlikely(size > SLUB_MAX_SIZE))
2662 return kmalloc_large(size, flags); 2695 return kmalloc_large(size, flags);
2663 2696
2664 s = get_slab(size, flags); 2697 s = get_slab(size, flags);
@@ -2666,7 +2699,12 @@ void *__kmalloc(size_t size, gfp_t flags)
2666 if (unlikely(ZERO_OR_NULL_PTR(s))) 2699 if (unlikely(ZERO_OR_NULL_PTR(s)))
2667 return s; 2700 return s;
2668 2701
2669 return slab_alloc(s, flags, -1, _RET_IP_); 2702 ret = slab_alloc(s, flags, -1, _RET_IP_);
2703
2704 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2705 size, s->size, flags);
2706
2707 return ret;
2670} 2708}
2671EXPORT_SYMBOL(__kmalloc); 2709EXPORT_SYMBOL(__kmalloc);
2672 2710
@@ -2685,16 +2723,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2685void *__kmalloc_node(size_t size, gfp_t flags, int node) 2723void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2724{
2687 struct kmem_cache *s; 2725 struct kmem_cache *s;
2726 void *ret;
2727
2728 if (unlikely(size > SLUB_MAX_SIZE)) {
2729 ret = kmalloc_large_node(size, flags, node);
2688 2730
2689 if (unlikely(size > PAGE_SIZE)) 2731 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
2690 return kmalloc_large_node(size, flags, node); 2732 _RET_IP_, ret,
2733 size, PAGE_SIZE << get_order(size),
2734 flags, node);
2735
2736 return ret;
2737 }
2691 2738
2692 s = get_slab(size, flags); 2739 s = get_slab(size, flags);
2693 2740
2694 if (unlikely(ZERO_OR_NULL_PTR(s))) 2741 if (unlikely(ZERO_OR_NULL_PTR(s)))
2695 return s; 2742 return s;
2696 2743
2697 return slab_alloc(s, flags, node, _RET_IP_); 2744 ret = slab_alloc(s, flags, node, _RET_IP_);
2745
2746 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
2747 size, s->size, flags, node);
2748
2749 return ret;
2698} 2750}
2699EXPORT_SYMBOL(__kmalloc_node); 2751EXPORT_SYMBOL(__kmalloc_node);
2700#endif 2752#endif
@@ -2753,6 +2805,8 @@ void kfree(const void *x)
2753 return; 2805 return;
2754 } 2806 }
2755 slab_free(page->slab, page, object, _RET_IP_); 2807 slab_free(page->slab, page, object, _RET_IP_);
2808
2809 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
2756} 2810}
2757EXPORT_SYMBOL(kfree); 2811EXPORT_SYMBOL(kfree);
2758 2812
@@ -2986,7 +3040,7 @@ void __init kmem_cache_init(void)
2986 caches++; 3040 caches++;
2987 } 3041 }
2988 3042
2989 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 3043 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
2990 create_kmalloc_cache(&kmalloc_caches[i], 3044 create_kmalloc_cache(&kmalloc_caches[i],
2991 "kmalloc", 1 << i, GFP_KERNEL); 3045 "kmalloc", 1 << i, GFP_KERNEL);
2992 caches++; 3046 caches++;
@@ -3023,7 +3077,7 @@ void __init kmem_cache_init(void)
3023 slab_state = UP; 3077 slab_state = UP;
3024 3078
3025 /* Provide the correct kmalloc names now that the caches are up */ 3079 /* Provide the correct kmalloc names now that the caches are up */
3026 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3080 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3027 kmalloc_caches[i]. name = 3081 kmalloc_caches[i]. name =
3028 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3082 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3029 3083
@@ -3222,8 +3276,9 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3222void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3276void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3223{ 3277{
3224 struct kmem_cache *s; 3278 struct kmem_cache *s;
3279 void *ret;
3225 3280
3226 if (unlikely(size > PAGE_SIZE)) 3281 if (unlikely(size > SLUB_MAX_SIZE))
3227 return kmalloc_large(size, gfpflags); 3282 return kmalloc_large(size, gfpflags);
3228 3283
3229 s = get_slab(size, gfpflags); 3284 s = get_slab(size, gfpflags);
@@ -3231,15 +3286,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3231 if (unlikely(ZERO_OR_NULL_PTR(s))) 3286 if (unlikely(ZERO_OR_NULL_PTR(s)))
3232 return s; 3287 return s;
3233 3288
3234 return slab_alloc(s, gfpflags, -1, caller); 3289 ret = slab_alloc(s, gfpflags, -1, caller);
3290
3291 /* Honor the call site pointer we recieved. */
3292 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
3293 s->size, gfpflags);
3294
3295 return ret;
3235} 3296}
3236 3297
3237void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3298void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3238 int node, unsigned long caller) 3299 int node, unsigned long caller)
3239{ 3300{
3240 struct kmem_cache *s; 3301 struct kmem_cache *s;
3302 void *ret;
3241 3303
3242 if (unlikely(size > PAGE_SIZE)) 3304 if (unlikely(size > SLUB_MAX_SIZE))
3243 return kmalloc_large_node(size, gfpflags, node); 3305 return kmalloc_large_node(size, gfpflags, node);
3244 3306
3245 s = get_slab(size, gfpflags); 3307 s = get_slab(size, gfpflags);
@@ -3247,7 +3309,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3247 if (unlikely(ZERO_OR_NULL_PTR(s))) 3309 if (unlikely(ZERO_OR_NULL_PTR(s)))
3248 return s; 3310 return s;
3249 3311
3250 return slab_alloc(s, gfpflags, node, caller); 3312 ret = slab_alloc(s, gfpflags, node, caller);
3313
3314 /* Honor the call site pointer we recieved. */
3315 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
3316 size, s->size, gfpflags, node);
3317
3318 return ret;
3251} 3319}
3252 3320
3253#ifdef CONFIG_SLUB_DEBUG 3321#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6177e3bcd66b..ae6f4c174a12 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1965,6 +1965,8 @@ static int kswapd(void *p)
1965 }; 1965 };
1966 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1966 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1967 1967
1968 lockdep_set_current_reclaim_state(GFP_KERNEL);
1969
1968 if (!cpumask_empty(cpumask)) 1970 if (!cpumask_empty(cpumask))
1969 set_cpus_allowed_ptr(tsk, cpumask); 1971 set_cpus_allowed_ptr(tsk, cpumask);
1970 current->reclaim_state = &reclaim_state; 1972 current->reclaim_state = &reclaim_state;