aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:22:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:13 -0500
commit2633d7a028239a738b793be5ca8fa6ac312f5793 (patch)
tree48a9f157b2c2a8218611aaade9667cacc2e018ec /mm
parent6ccfb5bcf52bcf100fa085946f044fdbba015048 (diff)
slab/slub: consider a memcg parameter in kmem_create_cache
Allow a memcg parameter to be passed during cache creation. When the slub allocator is being used, it will only merge caches that belong to the same memcg. We'll do this by scanning the global list, and then translating the cache to a memcg-specific cache Default function is created as a wrapper, passing NULL to the memcg version. We only merge caches that belong to the same memcg. A helper is provided, memcg_css_id: because slub needs a unique cache name for sysfs. Since this is visible, but not the canonical location for slab data, the cache name is not used, the css_id should suffice. Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c51
-rw-r--r--mm/slab.h23
-rw-r--r--mm/slab_common.c42
-rw-r--r--mm/slub.c19
4 files changed, 118 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e16694d5e118..3eafe6cf6ca4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -341,6 +341,14 @@ struct mem_cgroup {
341#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 341#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
342 struct tcp_memcontrol tcp_mem; 342 struct tcp_memcontrol tcp_mem;
343#endif 343#endif
344#if defined(CONFIG_MEMCG_KMEM)
345 /* analogous to slab_common's slab_caches list. per-memcg */
346 struct list_head memcg_slab_caches;
347 /* Not a spinlock, we can take a lot of time walking the list */
348 struct mutex slab_caches_mutex;
349 /* Index in the kmem_cache->memcg_params->memcg_caches array */
350 int kmemcg_id;
351#endif
344}; 352};
345 353
346/* internal only representation about the status of kmem accounting. */ 354/* internal only representation about the status of kmem accounting. */
@@ -2785,6 +2793,47 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2785 mem_cgroup_put(memcg); 2793 mem_cgroup_put(memcg);
2786} 2794}
2787 2795
2796void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
2797{
2798 if (!memcg)
2799 return;
2800
2801 mutex_lock(&memcg->slab_caches_mutex);
2802 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
2803 mutex_unlock(&memcg->slab_caches_mutex);
2804}
2805
2806/*
2807 * helper for acessing a memcg's index. It will be used as an index in the
2808 * child cache array in kmem_cache, and also to derive its name. This function
2809 * will return -1 when this is not a kmem-limited memcg.
2810 */
2811int memcg_cache_id(struct mem_cgroup *memcg)
2812{
2813 return memcg ? memcg->kmemcg_id : -1;
2814}
2815
2816int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s)
2817{
2818 size_t size = sizeof(struct memcg_cache_params);
2819
2820 if (!memcg_kmem_enabled())
2821 return 0;
2822
2823 s->memcg_params = kzalloc(size, GFP_KERNEL);
2824 if (!s->memcg_params)
2825 return -ENOMEM;
2826
2827 if (memcg)
2828 s->memcg_params->memcg = memcg;
2829 return 0;
2830}
2831
2832void memcg_release_cache(struct kmem_cache *s)
2833{
2834 kfree(s->memcg_params);
2835}
2836
2788/* 2837/*
2789 * We need to verify if the allocation against current->mm->owner's memcg is 2838 * We need to verify if the allocation against current->mm->owner's memcg is
2790 * possible for the given order. But the page is not allocated yet, so we'll 2839 * possible for the given order. But the page is not allocated yet, so we'll
@@ -5026,7 +5075,9 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
5026#ifdef CONFIG_MEMCG_KMEM 5075#ifdef CONFIG_MEMCG_KMEM
5027static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5076static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5028{ 5077{
5078 memcg->kmemcg_id = -1;
5029 memcg_propagate_kmem(memcg); 5079 memcg_propagate_kmem(memcg);
5080
5030 return mem_cgroup_sockets_init(memcg, ss); 5081 return mem_cgroup_sockets_init(memcg, ss);
5031}; 5082};
5032 5083
diff --git a/mm/slab.h b/mm/slab.h
index 49e7a8b1d27e..abe582d20c79 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -43,12 +43,15 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
43extern void create_boot_cache(struct kmem_cache *, const char *name, 43extern void create_boot_cache(struct kmem_cache *, const char *name,
44 size_t size, unsigned long flags); 44 size_t size, unsigned long flags);
45 45
46struct mem_cgroup;
46#ifdef CONFIG_SLUB 47#ifdef CONFIG_SLUB
47struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 48struct kmem_cache *
48 size_t align, unsigned long flags, void (*ctor)(void *)); 49__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
50 size_t align, unsigned long flags, void (*ctor)(void *));
49#else 51#else
50static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 52static inline struct kmem_cache *
51 size_t align, unsigned long flags, void (*ctor)(void *)) 53__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
54 size_t align, unsigned long flags, void (*ctor)(void *))
52{ return NULL; } 55{ return NULL; }
53#endif 56#endif
54 57
@@ -106,11 +109,23 @@ static inline bool is_root_cache(struct kmem_cache *s)
106{ 109{
107 return !s->memcg_params || s->memcg_params->is_root_cache; 110 return !s->memcg_params || s->memcg_params->is_root_cache;
108} 111}
112
113static inline bool cache_match_memcg(struct kmem_cache *cachep,
114 struct mem_cgroup *memcg)
115{
116 return (is_root_cache(cachep) && !memcg) ||
117 (cachep->memcg_params->memcg == memcg);
118}
109#else 119#else
110static inline bool is_root_cache(struct kmem_cache *s) 120static inline bool is_root_cache(struct kmem_cache *s)
111{ 121{
112 return true; 122 return true;
113} 123}
114 124
125static inline bool cache_match_memcg(struct kmem_cache *cachep,
126 struct mem_cgroup *memcg)
127{
128 return true;
129}
115#endif 130#endif
116#endif 131#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a8e76d79ee65..3031badcc577 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <linux/memcontrol.h>
21 22
22#include "slab.h" 23#include "slab.h"
23 24
@@ -27,7 +28,8 @@ DEFINE_MUTEX(slab_mutex);
27struct kmem_cache *kmem_cache; 28struct kmem_cache *kmem_cache;
28 29
29#ifdef CONFIG_DEBUG_VM 30#ifdef CONFIG_DEBUG_VM
30static int kmem_cache_sanity_check(const char *name, size_t size) 31static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
32 size_t size)
31{ 33{
32 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
33 35
@@ -53,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
53 continue; 55 continue;
54 } 56 }
55 57
56 if (!strcmp(s->name, name)) { 58 /*
59 * For simplicity, we won't check this in the list of memcg
60 * caches. We have control over memcg naming, and if there
61 * aren't duplicates in the global list, there won't be any
62 * duplicates in the memcg lists as well.
63 */
64 if (!memcg && !strcmp(s->name, name)) {
57 pr_err("%s (%s): Cache name already exists.\n", 65 pr_err("%s (%s): Cache name already exists.\n",
58 __func__, name); 66 __func__, name);
59 dump_stack(); 67 dump_stack();
@@ -66,7 +74,8 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
66 return 0; 74 return 0;
67} 75}
68#else 76#else
69static inline int kmem_cache_sanity_check(const char *name, size_t size) 77static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
78 const char *name, size_t size)
70{ 79{
71 return 0; 80 return 0;
72} 81}
@@ -125,8 +134,9 @@ unsigned long calculate_alignment(unsigned long flags,
125 * as davem. 134 * as davem.
126 */ 135 */
127 136
128struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, 137struct kmem_cache *
129 unsigned long flags, void (*ctor)(void *)) 138kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
139 size_t align, unsigned long flags, void (*ctor)(void *))
130{ 140{
131 struct kmem_cache *s = NULL; 141 struct kmem_cache *s = NULL;
132 int err = 0; 142 int err = 0;
@@ -134,7 +144,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
134 get_online_cpus(); 144 get_online_cpus();
135 mutex_lock(&slab_mutex); 145 mutex_lock(&slab_mutex);
136 146
137 if (!kmem_cache_sanity_check(name, size) == 0) 147 if (!kmem_cache_sanity_check(memcg, name, size) == 0)
138 goto out_locked; 148 goto out_locked;
139 149
140 /* 150 /*
@@ -145,7 +155,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
145 */ 155 */
146 flags &= CACHE_CREATE_MASK; 156 flags &= CACHE_CREATE_MASK;
147 157
148 s = __kmem_cache_alias(name, size, align, flags, ctor); 158 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
149 if (s) 159 if (s)
150 goto out_locked; 160 goto out_locked;
151 161
@@ -154,6 +164,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
154 s->object_size = s->size = size; 164 s->object_size = s->size = size;
155 s->align = calculate_alignment(flags, align, size); 165 s->align = calculate_alignment(flags, align, size);
156 s->ctor = ctor; 166 s->ctor = ctor;
167
168 if (memcg_register_cache(memcg, s)) {
169 kmem_cache_free(kmem_cache, s);
170 err = -ENOMEM;
171 goto out_locked;
172 }
173
157 s->name = kstrdup(name, GFP_KERNEL); 174 s->name = kstrdup(name, GFP_KERNEL);
158 if (!s->name) { 175 if (!s->name) {
159 kmem_cache_free(kmem_cache, s); 176 kmem_cache_free(kmem_cache, s);
@@ -163,10 +180,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
163 180
164 err = __kmem_cache_create(s, flags); 181 err = __kmem_cache_create(s, flags);
165 if (!err) { 182 if (!err) {
166
167 s->refcount = 1; 183 s->refcount = 1;
168 list_add(&s->list, &slab_caches); 184 list_add(&s->list, &slab_caches);
169 185 memcg_cache_list_add(memcg, s);
170 } else { 186 } else {
171 kfree(s->name); 187 kfree(s->name);
172 kmem_cache_free(kmem_cache, s); 188 kmem_cache_free(kmem_cache, s);
@@ -194,6 +210,13 @@ out_locked:
194 210
195 return s; 211 return s;
196} 212}
213
214struct kmem_cache *
215kmem_cache_create(const char *name, size_t size, size_t align,
216 unsigned long flags, void (*ctor)(void *))
217{
218 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor);
219}
197EXPORT_SYMBOL(kmem_cache_create); 220EXPORT_SYMBOL(kmem_cache_create);
198 221
199void kmem_cache_destroy(struct kmem_cache *s) 222void kmem_cache_destroy(struct kmem_cache *s)
@@ -209,6 +232,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
209 if (s->flags & SLAB_DESTROY_BY_RCU) 232 if (s->flags & SLAB_DESTROY_BY_RCU)
210 rcu_barrier(); 233 rcu_barrier();
211 234
235 memcg_release_cache(s);
212 kfree(s->name); 236 kfree(s->name);
213 kmem_cache_free(kmem_cache, s); 237 kmem_cache_free(kmem_cache, s);
214 } else { 238 } else {
diff --git a/mm/slub.c b/mm/slub.c
index 87f9f32bf0cd..985332b38852 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -31,6 +31,7 @@
31#include <linux/fault-inject.h> 31#include <linux/fault-inject.h>
32#include <linux/stacktrace.h> 32#include <linux/stacktrace.h>
33#include <linux/prefetch.h> 33#include <linux/prefetch.h>
34#include <linux/memcontrol.h>
34 35
35#include <trace/events/kmem.h> 36#include <trace/events/kmem.h>
36 37
@@ -3786,7 +3787,7 @@ static int slab_unmergeable(struct kmem_cache *s)
3786 return 0; 3787 return 0;
3787} 3788}
3788 3789
3789static struct kmem_cache *find_mergeable(size_t size, 3790static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
3790 size_t align, unsigned long flags, const char *name, 3791 size_t align, unsigned long flags, const char *name,
3791 void (*ctor)(void *)) 3792 void (*ctor)(void *))
3792{ 3793{
@@ -3822,17 +3823,21 @@ static struct kmem_cache *find_mergeable(size_t size,
3822 if (s->size - size >= sizeof(void *)) 3823 if (s->size - size >= sizeof(void *))
3823 continue; 3824 continue;
3824 3825
3826 if (!cache_match_memcg(s, memcg))
3827 continue;
3828
3825 return s; 3829 return s;
3826 } 3830 }
3827 return NULL; 3831 return NULL;
3828} 3832}
3829 3833
3830struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 3834struct kmem_cache *
3831 size_t align, unsigned long flags, void (*ctor)(void *)) 3835__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
3836 size_t align, unsigned long flags, void (*ctor)(void *))
3832{ 3837{
3833 struct kmem_cache *s; 3838 struct kmem_cache *s;
3834 3839
3835 s = find_mergeable(size, align, flags, name, ctor); 3840 s = find_mergeable(memcg, size, align, flags, name, ctor);
3836 if (s) { 3841 if (s) {
3837 s->refcount++; 3842 s->refcount++;
3838 /* 3843 /*
@@ -5156,6 +5161,12 @@ static char *create_unique_id(struct kmem_cache *s)
5156 if (p != name + 1) 5161 if (p != name + 1)
5157 *p++ = '-'; 5162 *p++ = '-';
5158 p += sprintf(p, "%07d", s->size); 5163 p += sprintf(p, "%07d", s->size);
5164
5165#ifdef CONFIG_MEMCG_KMEM
5166 if (!is_root_cache(s))
5167 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
5168#endif
5169
5159 BUG_ON(p > name + ID_STR_LENGTH - 1); 5170 BUG_ON(p > name + ID_STR_LENGTH - 1);
5160 return name; 5171 return name;
5161} 5172}