summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h15
-rw-r--r--include/linux/slab.h5
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/slab.h5
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c2
6 files changed, 26 insertions, 12 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c9d9a8e7b45f..5c97265c1c6e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -766,15 +766,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
766 return memcg ? memcg->kmemcg_id : -1; 766 return memcg ? memcg->kmemcg_id : -1;
767} 767}
768 768
769struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 769struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
770void __memcg_kmem_put_cache(struct kmem_cache *cachep); 770void __memcg_kmem_put_cache(struct kmem_cache *cachep);
771 771
772static inline bool __memcg_kmem_bypass(gfp_t gfp) 772static inline bool __memcg_kmem_bypass(void)
773{ 773{
774 if (!memcg_kmem_enabled()) 774 if (!memcg_kmem_enabled())
775 return true; 775 return true;
776 if (!(gfp & __GFP_ACCOUNT))
777 return true;
778 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 776 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
779 return true; 777 return true;
780 return false; 778 return false;
@@ -791,7 +789,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp)
791static __always_inline int memcg_kmem_charge(struct page *page, 789static __always_inline int memcg_kmem_charge(struct page *page,
792 gfp_t gfp, int order) 790 gfp_t gfp, int order)
793{ 791{
794 if (__memcg_kmem_bypass(gfp)) 792 if (__memcg_kmem_bypass())
793 return 0;
794 if (!(gfp & __GFP_ACCOUNT))
795 return 0; 795 return 0;
796 return __memcg_kmem_charge(page, gfp, order); 796 return __memcg_kmem_charge(page, gfp, order);
797} 797}
@@ -810,16 +810,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
810/** 810/**
811 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 811 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
812 * @cachep: the original global kmem cache 812 * @cachep: the original global kmem cache
813 * @gfp: allocation flags.
814 * 813 *
815 * All memory allocated from a per-memcg cache is charged to the owner memcg. 814 * All memory allocated from a per-memcg cache is charged to the owner memcg.
816 */ 815 */
817static __always_inline struct kmem_cache * 816static __always_inline struct kmem_cache *
818memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 817memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
819{ 818{
820 if (__memcg_kmem_bypass(gfp)) 819 if (__memcg_kmem_bypass())
821 return cachep; 820 return cachep;
822 return __memcg_kmem_get_cache(cachep); 821 return __memcg_kmem_get_cache(cachep, gfp);
823} 822}
824 823
825static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 824static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2037a861e367..3ffee7422012 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -86,6 +86,11 @@
86#else 86#else
87# define SLAB_FAILSLAB 0x00000000UL 87# define SLAB_FAILSLAB 0x00000000UL
88#endif 88#endif
89#ifdef CONFIG_MEMCG_KMEM
90# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
91#else
92# define SLAB_ACCOUNT 0x00000000UL
93#endif
89 94
90/* The following flags affect the page allocator grouping pages by mobility */ 95/* The following flags affect the page allocator grouping pages by mobility */
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 96#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 14cb1db4c52b..4bd6c4513393 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2356,7 +2356,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2356 * Can't be called in interrupt context or from kernel threads. 2356 * Can't be called in interrupt context or from kernel threads.
2357 * This function needs to be called with rcu_read_lock() held. 2357 * This function needs to be called with rcu_read_lock() held.
2358 */ 2358 */
2359struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) 2359struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2360{ 2360{
2361 struct mem_cgroup *memcg; 2361 struct mem_cgroup *memcg;
2362 struct kmem_cache *memcg_cachep; 2362 struct kmem_cache *memcg_cachep;
@@ -2364,6 +2364,12 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2364 2364
2365 VM_BUG_ON(!is_root_cache(cachep)); 2365 VM_BUG_ON(!is_root_cache(cachep));
2366 2366
2367 if (cachep->flags & SLAB_ACCOUNT)
2368 gfp |= __GFP_ACCOUNT;
2369
2370 if (!(gfp & __GFP_ACCOUNT))
2371 return cachep;
2372
2367 if (current->memcg_kmem_skip_account) 2373 if (current->memcg_kmem_skip_account)
2368 return cachep; 2374 return cachep;
2369 2375
diff --git a/mm/slab.h b/mm/slab.h
index 7b6087197997..c63b8699cfa3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -128,10 +128,11 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
128 128
129#if defined(CONFIG_SLAB) 129#if defined(CONFIG_SLAB)
130#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 130#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
132 SLAB_NOTRACK | SLAB_ACCOUNT)
132#elif defined(CONFIG_SLUB) 133#elif defined(CONFIG_SLUB)
133#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 134#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
134 SLAB_TEMPORARY | SLAB_NOTRACK) 135 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
135#else 136#else
136#define SLAB_CACHE_FLAGS (0) 137#define SLAB_CACHE_FLAGS (0)
137#endif 138#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3c6a86b4ec25..e016178063e1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,7 +37,8 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK) 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 SLAB_NOTRACK | SLAB_ACCOUNT)
41 42
42/* 43/*
43 * Merge control. If this is set then no merging of slab caches will occur. 44 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 46997517406e..2d0e610d195a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5362,6 +5362,8 @@ static char *create_unique_id(struct kmem_cache *s)
5362 *p++ = 'F'; 5362 *p++ = 'F';
5363 if (!(s->flags & SLAB_NOTRACK)) 5363 if (!(s->flags & SLAB_NOTRACK))
5364 *p++ = 't'; 5364 *p++ = 't';
5365 if (s->flags & SLAB_ACCOUNT)
5366 *p++ = 'A';
5365 if (p != name + 1) 5367 if (p != name + 1)
5366 *p++ = '-'; 5368 *p++ = '-';
5367 p += sprintf(p, "%07d", s->size); 5369 p += sprintf(p, "%07d", s->size);