aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h17
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/slab.h48
-rw-r--r--mm/slab_common.c129
-rw-r--r--mm/slub.c5
7 files changed, 111 insertions, 103 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2e3b448cfa2d..1e03c11bbfbd 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -473,14 +473,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
473#ifndef ARCH_SLAB_MINALIGN 473#ifndef ARCH_SLAB_MINALIGN
474#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 474#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
475#endif 475#endif
476
477struct memcg_cache_array {
478 struct rcu_head rcu;
479 struct kmem_cache *entries[0];
480};
481
476/* 482/*
477 * This is the main placeholder for memcg-related information in kmem caches. 483 * This is the main placeholder for memcg-related information in kmem caches.
478 * struct kmem_cache will hold a pointer to it, so the memory cost while
479 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
480 * would otherwise be if that would be bundled in kmem_cache: we'll need an
481 * extra pointer chase. But the trade off clearly lays in favor of not
482 * penalizing non-users.
483 *
484 * Both the root cache and the child caches will have it. For the root cache, 484 * Both the root cache and the child caches will have it. For the root cache,
485 * this will hold a dynamically allocated array large enough to hold 485 * this will hold a dynamically allocated array large enough to hold
486 * information about the currently limited memcgs in the system. To allow the 486 * information about the currently limited memcgs in the system. To allow the
@@ -495,10 +495,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
495struct memcg_cache_params { 495struct memcg_cache_params {
496 bool is_root_cache; 496 bool is_root_cache;
497 union { 497 union {
498 struct { 498 struct memcg_cache_array __rcu *memcg_caches;
499 struct rcu_head rcu_head;
500 struct kmem_cache *memcg_caches[0];
501 };
502 struct { 499 struct {
503 struct mem_cgroup *memcg; 500 struct mem_cgroup *memcg;
504 struct kmem_cache *root_cache; 501 struct kmem_cache *root_cache;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index b869d1662ba3..33d049066c3d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -70,7 +70,7 @@ struct kmem_cache {
70 int obj_offset; 70 int obj_offset;
71#endif /* CONFIG_DEBUG_SLAB */ 71#endif /* CONFIG_DEBUG_SLAB */
72#ifdef CONFIG_MEMCG_KMEM 72#ifdef CONFIG_MEMCG_KMEM
73 struct memcg_cache_params *memcg_params; 73 struct memcg_cache_params memcg_params;
74#endif 74#endif
75 75
76 struct kmem_cache_node *node[MAX_NUMNODES]; 76 struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d82abd40a3c0..9abf04ed0999 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
85 struct kobject kobj; /* For sysfs */ 85 struct kobject kobj; /* For sysfs */
86#endif 86#endif
87#ifdef CONFIG_MEMCG_KMEM 87#ifdef CONFIG_MEMCG_KMEM
88 struct memcg_cache_params *memcg_params; 88 struct memcg_cache_params memcg_params;
89 int max_attr_size; /* for propagation, maximum size of a stored attr */ 89 int max_attr_size; /* for propagation, maximum size of a stored attr */
90#ifdef CONFIG_SYSFS 90#ifdef CONFIG_SYSFS
91 struct kset *memcg_kset; 91 struct kset *memcg_kset;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index afa55bb38cbd..6f3c0fcd7a2d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -332,7 +332,7 @@ struct mem_cgroup {
332 struct cg_proto tcp_mem; 332 struct cg_proto tcp_mem;
333#endif 333#endif
334#if defined(CONFIG_MEMCG_KMEM) 334#if defined(CONFIG_MEMCG_KMEM)
335 /* Index in the kmem_cache->memcg_params->memcg_caches array */ 335 /* Index in the kmem_cache->memcg_params.memcg_caches array */
336 int kmemcg_id; 336 int kmemcg_id;
337#endif 337#endif
338 338
@@ -531,7 +531,7 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
531 531
532#ifdef CONFIG_MEMCG_KMEM 532#ifdef CONFIG_MEMCG_KMEM
533/* 533/*
534 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches. 534 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
535 * The main reason for not using cgroup id for this: 535 * The main reason for not using cgroup id for this:
536 * this works better in sparse environments, where we have a lot of memcgs, 536 * this works better in sparse environments, where we have a lot of memcgs,
537 * but only a few kmem-limited. Or also, if we have, for instance, 200 537 * but only a few kmem-limited. Or also, if we have, for instance, 200
@@ -2667,8 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2667 struct mem_cgroup *memcg; 2667 struct mem_cgroup *memcg;
2668 struct kmem_cache *memcg_cachep; 2668 struct kmem_cache *memcg_cachep;
2669 2669
2670 VM_BUG_ON(!cachep->memcg_params); 2670 VM_BUG_ON(!is_root_cache(cachep));
2671 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
2672 2671
2673 if (current->memcg_kmem_skip_account) 2672 if (current->memcg_kmem_skip_account)
2674 return cachep; 2673 return cachep;
@@ -2702,7 +2701,7 @@ out:
2702void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2701void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2703{ 2702{
2704 if (!is_root_cache(cachep)) 2703 if (!is_root_cache(cachep))
2705 css_put(&cachep->memcg_params->memcg->css); 2704 css_put(&cachep->memcg_params.memcg->css);
2706} 2705}
2707 2706
2708/* 2707/*
@@ -2778,7 +2777,7 @@ struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
2778 if (PageSlab(page)) { 2777 if (PageSlab(page)) {
2779 cachep = page->slab_cache; 2778 cachep = page->slab_cache;
2780 if (!is_root_cache(cachep)) 2779 if (!is_root_cache(cachep))
2781 memcg = cachep->memcg_params->memcg; 2780 memcg = cachep->memcg_params.memcg;
2782 } else 2781 } else
2783 /* page allocated by alloc_kmem_pages */ 2782 /* page allocated by alloc_kmem_pages */
2784 memcg = page->mem_cgroup; 2783 memcg = page->mem_cgroup;
diff --git a/mm/slab.h b/mm/slab.h
index 90430d6f665e..53a623f85931 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -86,8 +86,6 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
86extern void create_boot_cache(struct kmem_cache *, const char *name, 86extern void create_boot_cache(struct kmem_cache *, const char *name,
87 size_t size, unsigned long flags); 87 size_t size, unsigned long flags);
88 88
89struct mem_cgroup;
90
91int slab_unmergeable(struct kmem_cache *s); 89int slab_unmergeable(struct kmem_cache *s);
92struct kmem_cache *find_mergeable(size_t size, size_t align, 90struct kmem_cache *find_mergeable(size_t size, size_t align,
93 unsigned long flags, const char *name, void (*ctor)(void *)); 91 unsigned long flags, const char *name, void (*ctor)(void *));
@@ -167,14 +165,13 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
167#ifdef CONFIG_MEMCG_KMEM 165#ifdef CONFIG_MEMCG_KMEM
168static inline bool is_root_cache(struct kmem_cache *s) 166static inline bool is_root_cache(struct kmem_cache *s)
169{ 167{
170 return !s->memcg_params || s->memcg_params->is_root_cache; 168 return s->memcg_params.is_root_cache;
171} 169}
172 170
173static inline bool slab_equal_or_root(struct kmem_cache *s, 171static inline bool slab_equal_or_root(struct kmem_cache *s,
174 struct kmem_cache *p) 172 struct kmem_cache *p)
175{ 173{
176 return (p == s) || 174 return p == s || p == s->memcg_params.root_cache;
177 (s->memcg_params && (p == s->memcg_params->root_cache));
178} 175}
179 176
180/* 177/*
@@ -185,37 +182,30 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
185static inline const char *cache_name(struct kmem_cache *s) 182static inline const char *cache_name(struct kmem_cache *s)
186{ 183{
187 if (!is_root_cache(s)) 184 if (!is_root_cache(s))
188 return s->memcg_params->root_cache->name; 185 s = s->memcg_params.root_cache;
189 return s->name; 186 return s->name;
190} 187}
191 188
192/* 189/*
193 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 190 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
194 * That said the caller must assure the memcg's cache won't go away. Since once 191 * That said the caller must assure the memcg's cache won't go away by either
195 * created a memcg's cache is destroyed only along with the root cache, it is 192 * taking a css reference to the owner cgroup, or holding the slab_mutex.
196 * true if we are going to allocate from the cache or hold a reference to the
197 * root cache by other means. Otherwise, we should hold either the slab_mutex
198 * or the memcg's slab_caches_mutex while calling this function and accessing
199 * the returned value.
200 */ 193 */
201static inline struct kmem_cache * 194static inline struct kmem_cache *
202cache_from_memcg_idx(struct kmem_cache *s, int idx) 195cache_from_memcg_idx(struct kmem_cache *s, int idx)
203{ 196{
204 struct kmem_cache *cachep; 197 struct kmem_cache *cachep;
205 struct memcg_cache_params *params; 198 struct memcg_cache_array *arr;
206
207 if (!s->memcg_params)
208 return NULL;
209 199
210 rcu_read_lock(); 200 rcu_read_lock();
211 params = rcu_dereference(s->memcg_params); 201 arr = rcu_dereference(s->memcg_params.memcg_caches);
212 202
213 /* 203 /*
214 * Make sure we will access the up-to-date value. The code updating 204 * Make sure we will access the up-to-date value. The code updating
215 * memcg_caches issues a write barrier to match this (see 205 * memcg_caches issues a write barrier to match this (see
216 * memcg_register_cache()). 206 * memcg_create_kmem_cache()).
217 */ 207 */
218 cachep = lockless_dereference(params->memcg_caches[idx]); 208 cachep = lockless_dereference(arr->entries[idx]);
219 rcu_read_unlock(); 209 rcu_read_unlock();
220 210
221 return cachep; 211 return cachep;
@@ -225,7 +215,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
225{ 215{
226 if (is_root_cache(s)) 216 if (is_root_cache(s))
227 return s; 217 return s;
228 return s->memcg_params->root_cache; 218 return s->memcg_params.root_cache;
229} 219}
230 220
231static __always_inline int memcg_charge_slab(struct kmem_cache *s, 221static __always_inline int memcg_charge_slab(struct kmem_cache *s,
@@ -235,7 +225,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
235 return 0; 225 return 0;
236 if (is_root_cache(s)) 226 if (is_root_cache(s))
237 return 0; 227 return 0;
238 return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order); 228 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
239} 229}
240 230
241static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 231static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -244,9 +234,13 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
244 return; 234 return;
245 if (is_root_cache(s)) 235 if (is_root_cache(s))
246 return; 236 return;
247 memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order); 237 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
248} 238}
249#else 239
240extern void slab_init_memcg_params(struct kmem_cache *);
241
242#else /* !CONFIG_MEMCG_KMEM */
243
250static inline bool is_root_cache(struct kmem_cache *s) 244static inline bool is_root_cache(struct kmem_cache *s)
251{ 245{
252 return true; 246 return true;
@@ -282,7 +276,11 @@ static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
282static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 276static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
283{ 277{
284} 278}
285#endif 279
280static inline void slab_init_memcg_params(struct kmem_cache *s)
281{
282}
283#endif /* CONFIG_MEMCG_KMEM */
286 284
287static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 285static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
288{ 286{
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23f5fcde6043..7cc32cf126ef 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -106,62 +106,66 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
106#endif 106#endif
107 107
108#ifdef CONFIG_MEMCG_KMEM 108#ifdef CONFIG_MEMCG_KMEM
109static int memcg_alloc_cache_params(struct mem_cgroup *memcg, 109void slab_init_memcg_params(struct kmem_cache *s)
110 struct kmem_cache *s, struct kmem_cache *root_cache)
111{ 110{
112 size_t size; 111 s->memcg_params.is_root_cache = true;
112 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
113}
114
115static int init_memcg_params(struct kmem_cache *s,
116 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
117{
118 struct memcg_cache_array *arr;
113 119
114 if (!memcg_kmem_enabled()) 120 if (memcg) {
121 s->memcg_params.is_root_cache = false;
122 s->memcg_params.memcg = memcg;
123 s->memcg_params.root_cache = root_cache;
115 return 0; 124 return 0;
125 }
116 126
117 if (!memcg) { 127 slab_init_memcg_params(s);
118 size = offsetof(struct memcg_cache_params, memcg_caches);
119 size += memcg_nr_cache_ids * sizeof(void *);
120 } else
121 size = sizeof(struct memcg_cache_params);
122 128
123 s->memcg_params = kzalloc(size, GFP_KERNEL); 129 if (!memcg_nr_cache_ids)
124 if (!s->memcg_params) 130 return 0;
125 return -ENOMEM;
126 131
127 if (memcg) { 132 arr = kzalloc(sizeof(struct memcg_cache_array) +
128 s->memcg_params->memcg = memcg; 133 memcg_nr_cache_ids * sizeof(void *),
129 s->memcg_params->root_cache = root_cache; 134 GFP_KERNEL);
130 } else 135 if (!arr)
131 s->memcg_params->is_root_cache = true; 136 return -ENOMEM;
132 137
138 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
133 return 0; 139 return 0;
134} 140}
135 141
136static void memcg_free_cache_params(struct kmem_cache *s) 142static void destroy_memcg_params(struct kmem_cache *s)
137{ 143{
138 kfree(s->memcg_params); 144 if (is_root_cache(s))
145 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
139} 146}
140 147
141static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs) 148static int update_memcg_params(struct kmem_cache *s, int new_array_size)
142{ 149{
143 int size; 150 struct memcg_cache_array *old, *new;
144 struct memcg_cache_params *new_params, *cur_params;
145 151
146 BUG_ON(!is_root_cache(s)); 152 if (!is_root_cache(s))
147 153 return 0;
148 size = offsetof(struct memcg_cache_params, memcg_caches);
149 size += num_memcgs * sizeof(void *);
150 154
151 new_params = kzalloc(size, GFP_KERNEL); 155 new = kzalloc(sizeof(struct memcg_cache_array) +
152 if (!new_params) 156 new_array_size * sizeof(void *), GFP_KERNEL);
157 if (!new)
153 return -ENOMEM; 158 return -ENOMEM;
154 159
155 cur_params = s->memcg_params; 160 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
156 memcpy(new_params->memcg_caches, cur_params->memcg_caches, 161 lockdep_is_held(&slab_mutex));
157 memcg_nr_cache_ids * sizeof(void *)); 162 if (old)
158 163 memcpy(new->entries, old->entries,
159 new_params->is_root_cache = true; 164 memcg_nr_cache_ids * sizeof(void *));
160
161 rcu_assign_pointer(s->memcg_params, new_params);
162 if (cur_params)
163 kfree_rcu(cur_params, rcu_head);
164 165
166 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
167 if (old)
168 kfree_rcu(old, rcu);
165 return 0; 169 return 0;
166} 170}
167 171
@@ -172,10 +176,7 @@ int memcg_update_all_caches(int num_memcgs)
172 176
173 mutex_lock(&slab_mutex); 177 mutex_lock(&slab_mutex);
174 list_for_each_entry(s, &slab_caches, list) { 178 list_for_each_entry(s, &slab_caches, list) {
175 if (!is_root_cache(s)) 179 ret = update_memcg_params(s, num_memcgs);
176 continue;
177
178 ret = memcg_update_cache_params(s, num_memcgs);
179 /* 180 /*
180 * Instead of freeing the memory, we'll just leave the caches 181 * Instead of freeing the memory, we'll just leave the caches
181 * up to this point in an updated state. 182 * up to this point in an updated state.
@@ -187,13 +188,13 @@ int memcg_update_all_caches(int num_memcgs)
187 return ret; 188 return ret;
188} 189}
189#else 190#else
190static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, 191static inline int init_memcg_params(struct kmem_cache *s,
191 struct kmem_cache *s, struct kmem_cache *root_cache) 192 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
192{ 193{
193 return 0; 194 return 0;
194} 195}
195 196
196static inline void memcg_free_cache_params(struct kmem_cache *s) 197static inline void destroy_memcg_params(struct kmem_cache *s)
197{ 198{
198} 199}
199#endif /* CONFIG_MEMCG_KMEM */ 200#endif /* CONFIG_MEMCG_KMEM */
@@ -311,7 +312,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
311 s->align = align; 312 s->align = align;
312 s->ctor = ctor; 313 s->ctor = ctor;
313 314
314 err = memcg_alloc_cache_params(memcg, s, root_cache); 315 err = init_memcg_params(s, memcg, root_cache);
315 if (err) 316 if (err)
316 goto out_free_cache; 317 goto out_free_cache;
317 318
@@ -327,7 +328,7 @@ out:
327 return s; 328 return s;
328 329
329out_free_cache: 330out_free_cache:
330 memcg_free_cache_params(s); 331 destroy_memcg_params(s);
331 kmem_cache_free(kmem_cache, s); 332 kmem_cache_free(kmem_cache, s);
332 goto out; 333 goto out;
333} 334}
@@ -439,11 +440,15 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
439 440
440#ifdef CONFIG_MEMCG_KMEM 441#ifdef CONFIG_MEMCG_KMEM
441 if (!is_root_cache(s)) { 442 if (!is_root_cache(s)) {
442 struct kmem_cache *root_cache = s->memcg_params->root_cache; 443 int idx;
443 int memcg_id = memcg_cache_id(s->memcg_params->memcg); 444 struct memcg_cache_array *arr;
444 445
445 BUG_ON(root_cache->memcg_params->memcg_caches[memcg_id] != s); 446 idx = memcg_cache_id(s->memcg_params.memcg);
446 root_cache->memcg_params->memcg_caches[memcg_id] = NULL; 447 arr = rcu_dereference_protected(s->memcg_params.root_cache->
448 memcg_params.memcg_caches,
449 lockdep_is_held(&slab_mutex));
450 BUG_ON(arr->entries[idx] != s);
451 arr->entries[idx] = NULL;
447 } 452 }
448#endif 453#endif
449 list_move(&s->list, release); 454 list_move(&s->list, release);
@@ -481,27 +486,32 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
481 struct kmem_cache *root_cache) 486 struct kmem_cache *root_cache)
482{ 487{
483 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ 488 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
484 int memcg_id = memcg_cache_id(memcg); 489 struct memcg_cache_array *arr;
485 struct kmem_cache *s = NULL; 490 struct kmem_cache *s = NULL;
486 char *cache_name; 491 char *cache_name;
492 int idx;
487 493
488 get_online_cpus(); 494 get_online_cpus();
489 get_online_mems(); 495 get_online_mems();
490 496
491 mutex_lock(&slab_mutex); 497 mutex_lock(&slab_mutex);
492 498
499 idx = memcg_cache_id(memcg);
500 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
501 lockdep_is_held(&slab_mutex));
502
493 /* 503 /*
494 * Since per-memcg caches are created asynchronously on first 504 * Since per-memcg caches are created asynchronously on first
495 * allocation (see memcg_kmem_get_cache()), several threads can try to 505 * allocation (see memcg_kmem_get_cache()), several threads can try to
496 * create the same cache, but only one of them may succeed. 506 * create the same cache, but only one of them may succeed.
497 */ 507 */
498 if (cache_from_memcg_idx(root_cache, memcg_id)) 508 if (arr->entries[idx])
499 goto out_unlock; 509 goto out_unlock;
500 510
501 cgroup_name(mem_cgroup_css(memcg)->cgroup, 511 cgroup_name(mem_cgroup_css(memcg)->cgroup,
502 memcg_name_buf, sizeof(memcg_name_buf)); 512 memcg_name_buf, sizeof(memcg_name_buf));
503 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, 513 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
504 memcg_cache_id(memcg), memcg_name_buf); 514 idx, memcg_name_buf);
505 if (!cache_name) 515 if (!cache_name)
506 goto out_unlock; 516 goto out_unlock;
507 517
@@ -525,7 +535,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
525 * initialized. 535 * initialized.
526 */ 536 */
527 smp_wmb(); 537 smp_wmb();
528 root_cache->memcg_params->memcg_caches[memcg_id] = s; 538 arr->entries[idx] = s;
529 539
530out_unlock: 540out_unlock:
531 mutex_unlock(&slab_mutex); 541 mutex_unlock(&slab_mutex);
@@ -545,7 +555,7 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
545 555
546 mutex_lock(&slab_mutex); 556 mutex_lock(&slab_mutex);
547 list_for_each_entry_safe(s, s2, &slab_caches, list) { 557 list_for_each_entry_safe(s, s2, &slab_caches, list) {
548 if (is_root_cache(s) || s->memcg_params->memcg != memcg) 558 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
549 continue; 559 continue;
550 /* 560 /*
551 * The cgroup is about to be freed and therefore has no charges 561 * The cgroup is about to be freed and therefore has no charges
@@ -564,7 +574,7 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
564 574
565void slab_kmem_cache_release(struct kmem_cache *s) 575void slab_kmem_cache_release(struct kmem_cache *s)
566{ 576{
567 memcg_free_cache_params(s); 577 destroy_memcg_params(s);
568 kfree(s->name); 578 kfree(s->name);
569 kmem_cache_free(kmem_cache, s); 579 kmem_cache_free(kmem_cache, s);
570} 580}
@@ -640,6 +650,9 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
640 s->name = name; 650 s->name = name;
641 s->size = s->object_size = size; 651 s->size = s->object_size = size;
642 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); 652 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
653
654 slab_init_memcg_params(s);
655
643 err = __kmem_cache_create(s, flags); 656 err = __kmem_cache_create(s, flags);
644 657
645 if (err) 658 if (err)
@@ -980,7 +993,7 @@ int memcg_slab_show(struct seq_file *m, void *p)
980 993
981 if (p == slab_caches.next) 994 if (p == slab_caches.next)
982 print_slabinfo_header(m); 995 print_slabinfo_header(m);
983 if (!is_root_cache(s) && s->memcg_params->memcg == memcg) 996 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
984 cache_show(s, m); 997 cache_show(s, m);
985 return 0; 998 return 0;
986} 999}
diff --git a/mm/slub.c b/mm/slub.c
index 8b8508adf9c2..75d55fdfe3a1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3577,6 +3577,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3577 p->slab_cache = s; 3577 p->slab_cache = s;
3578#endif 3578#endif
3579 } 3579 }
3580 slab_init_memcg_params(s);
3580 list_add(&s->list, &slab_caches); 3581 list_add(&s->list, &slab_caches);
3581 return s; 3582 return s;
3582} 3583}
@@ -4964,7 +4965,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
4964 if (is_root_cache(s)) 4965 if (is_root_cache(s))
4965 return; 4966 return;
4966 4967
4967 root_cache = s->memcg_params->root_cache; 4968 root_cache = s->memcg_params.root_cache;
4968 4969
4969 /* 4970 /*
4970 * This mean this cache had no attribute written. Therefore, no point 4971 * This mean this cache had no attribute written. Therefore, no point
@@ -5044,7 +5045,7 @@ static inline struct kset *cache_kset(struct kmem_cache *s)
5044{ 5045{
5045#ifdef CONFIG_MEMCG_KMEM 5046#ifdef CONFIG_MEMCG_KMEM
5046 if (!is_root_cache(s)) 5047 if (!is_root_cache(s))
5047 return s->memcg_params->root_cache->memcg_kset; 5048 return s->memcg_params.root_cache->memcg_kset;
5048#endif 5049#endif
5049 return slab_kset; 5050 return slab_kset;
5050} 5051}