aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--mm/kmemleak.c3
3 files changed, 8 insertions, 1 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 97a9373e61e8..15928f0647e4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,7 @@ struct vm_area_struct;
30#define ___GFP_HARDWALL 0x20000u 30#define ___GFP_HARDWALL 0x20000u
31#define ___GFP_THISNODE 0x40000u 31#define ___GFP_THISNODE 0x40000u
32#define ___GFP_RECLAIMABLE 0x80000u 32#define ___GFP_RECLAIMABLE 0x80000u
33#define ___GFP_NOACCOUNT 0x100000u
33#define ___GFP_NOTRACK 0x200000u 34#define ___GFP_NOTRACK 0x200000u
34#define ___GFP_NO_KSWAPD 0x400000u 35#define ___GFP_NO_KSWAPD 0x400000u
35#define ___GFP_OTHER_NODE 0x800000u 36#define ___GFP_OTHER_NODE 0x800000u
@@ -87,6 +88,7 @@ struct vm_area_struct;
87#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ 88#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
88#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ 89#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
89#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ 90#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
91#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
90#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ 92#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
91 93
92#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) 94#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 72dff5fb0d0c..6c8918114804 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
463 if (!memcg_kmem_enabled()) 463 if (!memcg_kmem_enabled())
464 return true; 464 return true;
465 465
466 if (gfp & __GFP_NOACCOUNT)
467 return true;
466 /* 468 /*
467 * __GFP_NOFAIL allocations will move on even if charging is not 469 * __GFP_NOFAIL allocations will move on even if charging is not
468 * possible. Therefore we don't even try, and have this allocation 470 * possible. Therefore we don't even try, and have this allocation
@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
522{ 524{
523 if (!memcg_kmem_enabled()) 525 if (!memcg_kmem_enabled())
524 return cachep; 526 return cachep;
527 if (gfp & __GFP_NOACCOUNT)
528 return cachep;
525 if (gfp & __GFP_NOFAIL) 529 if (gfp & __GFP_NOFAIL)
526 return cachep; 530 return cachep;
527 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 531 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5405aff5a590..f0fe4f2c1fa7 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -115,7 +115,8 @@
115#define BYTES_PER_POINTER sizeof(void *) 115#define BYTES_PER_POINTER sizeof(void *)
116 116
117/* GFP bitmask for kmemleak internal allocations */ 117/* GFP bitmask for kmemleak internal allocations */
118#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 118#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
119 __GFP_NOACCOUNT)) | \
119 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 120 __GFP_NORETRY | __GFP_NOMEMALLOC | \
120 __GFP_NOWARN) 121 __GFP_NOWARN)
121 122