diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2015-02-12 17:59:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 21:54:09 -0500 |
commit | f7ce3190c4a35bf887adb7a1aa1ba899b679872d (patch) | |
tree | 8a40d93f1e796e1007d59e67541ce3044430b927 /mm/slab.h | |
parent | 49e7e7ff8d551b5b1e2f8da8497b9058cfa25672 (diff) |
slab: embed memcg_cache_params to kmem_cache
Currently, kmem_cache stores a pointer to struct memcg_cache_params
instead of embedding it. The rationale is to save memory when kmem
accounting is disabled. However, the memcg_cache_params has shrivelled
drastically since it was first introduced:
* Initially:
struct memcg_cache_params {
bool is_root_cache;
union {
struct kmem_cache *memcg_caches[0];
struct {
struct mem_cgroup *memcg;
struct list_head list;
struct kmem_cache *root_cache;
bool dead;
atomic_t nr_pages;
struct work_struct destroy;
};
};
};
* Now:
struct memcg_cache_params {
bool is_root_cache;
union {
struct {
struct rcu_head rcu_head;
struct kmem_cache *memcg_caches[0];
};
struct {
struct mem_cgroup *memcg;
struct kmem_cache *root_cache;
};
};
};
So the memory saving does not seem to be a clear win anymore.
OTOH, keeping a pointer to memcg_cache_params struct instead of embedding
it results in touching one more cache line on kmem alloc/free hot paths.
Besides, it makes linking kmem caches in a list chained by a field of
struct memcg_cache_params really painful due to a level of indirection,
while I want to make them linked in the following patch. That said, let
us embed it.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r-- | mm/slab.h | 48 |
1 files changed, 23 insertions, 25 deletions
@@ -86,8 +86,6 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, | |||
86 | extern void create_boot_cache(struct kmem_cache *, const char *name, | 86 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
87 | size_t size, unsigned long flags); | 87 | size_t size, unsigned long flags); |
88 | 88 | ||
89 | struct mem_cgroup; | ||
90 | |||
91 | int slab_unmergeable(struct kmem_cache *s); | 89 | int slab_unmergeable(struct kmem_cache *s); |
92 | struct kmem_cache *find_mergeable(size_t size, size_t align, | 90 | struct kmem_cache *find_mergeable(size_t size, size_t align, |
93 | unsigned long flags, const char *name, void (*ctor)(void *)); | 91 | unsigned long flags, const char *name, void (*ctor)(void *)); |
@@ -167,14 +165,13 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
167 | #ifdef CONFIG_MEMCG_KMEM | 165 | #ifdef CONFIG_MEMCG_KMEM |
168 | static inline bool is_root_cache(struct kmem_cache *s) | 166 | static inline bool is_root_cache(struct kmem_cache *s) |
169 | { | 167 | { |
170 | return !s->memcg_params || s->memcg_params->is_root_cache; | 168 | return s->memcg_params.is_root_cache; |
171 | } | 169 | } |
172 | 170 | ||
173 | static inline bool slab_equal_or_root(struct kmem_cache *s, | 171 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
174 | struct kmem_cache *p) | 172 | struct kmem_cache *p) |
175 | { | 173 | { |
176 | return (p == s) || | 174 | return p == s || p == s->memcg_params.root_cache; |
177 | (s->memcg_params && (p == s->memcg_params->root_cache)); | ||
178 | } | 175 | } |
179 | 176 | ||
180 | /* | 177 | /* |
@@ -185,37 +182,30 @@ static inline bool slab_equal_or_root(struct kmem_cache *s, | |||
185 | static inline const char *cache_name(struct kmem_cache *s) | 182 | static inline const char *cache_name(struct kmem_cache *s) |
186 | { | 183 | { |
187 | if (!is_root_cache(s)) | 184 | if (!is_root_cache(s)) |
188 | return s->memcg_params->root_cache->name; | 185 | s = s->memcg_params.root_cache; |
189 | return s->name; | 186 | return s->name; |
190 | } | 187 | } |
191 | 188 | ||
192 | /* | 189 | /* |
193 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | 190 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
194 | * That said the caller must assure the memcg's cache won't go away. Since once | 191 | * That said the caller must assure the memcg's cache won't go away by either |
195 | * created a memcg's cache is destroyed only along with the root cache, it is | 192 | * taking a css reference to the owner cgroup, or holding the slab_mutex. |
196 | * true if we are going to allocate from the cache or hold a reference to the | ||
197 | * root cache by other means. Otherwise, we should hold either the slab_mutex | ||
198 | * or the memcg's slab_caches_mutex while calling this function and accessing | ||
199 | * the returned value. | ||
200 | */ | 193 | */ |
201 | static inline struct kmem_cache * | 194 | static inline struct kmem_cache * |
202 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | 195 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
203 | { | 196 | { |
204 | struct kmem_cache *cachep; | 197 | struct kmem_cache *cachep; |
205 | struct memcg_cache_params *params; | 198 | struct memcg_cache_array *arr; |
206 | |||
207 | if (!s->memcg_params) | ||
208 | return NULL; | ||
209 | 199 | ||
210 | rcu_read_lock(); | 200 | rcu_read_lock(); |
211 | params = rcu_dereference(s->memcg_params); | 201 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
212 | 202 | ||
213 | /* | 203 | /* |
214 | * Make sure we will access the up-to-date value. The code updating | 204 | * Make sure we will access the up-to-date value. The code updating |
215 | * memcg_caches issues a write barrier to match this (see | 205 | * memcg_caches issues a write barrier to match this (see |
216 | * memcg_register_cache()). | 206 | * memcg_create_kmem_cache()). |
217 | */ | 207 | */ |
218 | cachep = lockless_dereference(params->memcg_caches[idx]); | 208 | cachep = lockless_dereference(arr->entries[idx]); |
219 | rcu_read_unlock(); | 209 | rcu_read_unlock(); |
220 | 210 | ||
221 | return cachep; | 211 | return cachep; |
@@ -225,7 +215,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |||
225 | { | 215 | { |
226 | if (is_root_cache(s)) | 216 | if (is_root_cache(s)) |
227 | return s; | 217 | return s; |
228 | return s->memcg_params->root_cache; | 218 | return s->memcg_params.root_cache; |
229 | } | 219 | } |
230 | 220 | ||
231 | static __always_inline int memcg_charge_slab(struct kmem_cache *s, | 221 | static __always_inline int memcg_charge_slab(struct kmem_cache *s, |
@@ -235,7 +225,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s, | |||
235 | return 0; | 225 | return 0; |
236 | if (is_root_cache(s)) | 226 | if (is_root_cache(s)) |
237 | return 0; | 227 | return 0; |
238 | return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order); | 228 | return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); |
239 | } | 229 | } |
240 | 230 | ||
241 | static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | 231 | static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) |
@@ -244,9 +234,13 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | |||
244 | return; | 234 | return; |
245 | if (is_root_cache(s)) | 235 | if (is_root_cache(s)) |
246 | return; | 236 | return; |
247 | memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order); | 237 | memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); |
248 | } | 238 | } |
249 | #else | 239 | |
240 | extern void slab_init_memcg_params(struct kmem_cache *); | ||
241 | |||
242 | #else /* !CONFIG_MEMCG_KMEM */ | ||
243 | |||
250 | static inline bool is_root_cache(struct kmem_cache *s) | 244 | static inline bool is_root_cache(struct kmem_cache *s) |
251 | { | 245 | { |
252 | return true; | 246 | return true; |
@@ -282,7 +276,11 @@ static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) | |||
282 | static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | 276 | static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) |
283 | { | 277 | { |
284 | } | 278 | } |
285 | #endif | 279 | |
280 | static inline void slab_init_memcg_params(struct kmem_cache *s) | ||
281 | { | ||
282 | } | ||
283 | #endif /* CONFIG_MEMCG_KMEM */ | ||
286 | 284 | ||
287 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | 285 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
288 | { | 286 | { |