aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h67
1 files changed, 41 insertions, 26 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 1cf4005482dd..4c3ac12dd644 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -86,8 +86,6 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
86extern void create_boot_cache(struct kmem_cache *, const char *name, 86extern void create_boot_cache(struct kmem_cache *, const char *name,
87 size_t size, unsigned long flags); 87 size_t size, unsigned long flags);
88 88
89struct mem_cgroup;
90
91int slab_unmergeable(struct kmem_cache *s); 89int slab_unmergeable(struct kmem_cache *s);
92struct kmem_cache *find_mergeable(size_t size, size_t align, 90struct kmem_cache *find_mergeable(size_t size, size_t align,
93 unsigned long flags, const char *name, void (*ctor)(void *)); 91 unsigned long flags, const char *name, void (*ctor)(void *));
@@ -140,7 +138,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 138#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 139
142int __kmem_cache_shutdown(struct kmem_cache *); 140int __kmem_cache_shutdown(struct kmem_cache *);
143int __kmem_cache_shrink(struct kmem_cache *); 141int __kmem_cache_shrink(struct kmem_cache *, bool);
144void slab_kmem_cache_release(struct kmem_cache *); 142void slab_kmem_cache_release(struct kmem_cache *);
145 143
146struct seq_file; 144struct seq_file;
@@ -165,16 +163,27 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
165 size_t count, loff_t *ppos); 163 size_t count, loff_t *ppos);
166 164
167#ifdef CONFIG_MEMCG_KMEM 165#ifdef CONFIG_MEMCG_KMEM
166/*
167 * Iterate over all memcg caches of the given root cache. The caller must hold
168 * slab_mutex.
169 */
170#define for_each_memcg_cache(iter, root) \
171 list_for_each_entry(iter, &(root)->memcg_params.list, \
172 memcg_params.list)
173
174#define for_each_memcg_cache_safe(iter, tmp, root) \
175 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
176 memcg_params.list)
177
168static inline bool is_root_cache(struct kmem_cache *s) 178static inline bool is_root_cache(struct kmem_cache *s)
169{ 179{
170 return !s->memcg_params || s->memcg_params->is_root_cache; 180 return s->memcg_params.is_root_cache;
171} 181}
172 182
173static inline bool slab_equal_or_root(struct kmem_cache *s, 183static inline bool slab_equal_or_root(struct kmem_cache *s,
174 struct kmem_cache *p) 184 struct kmem_cache *p)
175{ 185{
176 return (p == s) || 186 return p == s || p == s->memcg_params.root_cache;
177 (s->memcg_params && (p == s->memcg_params->root_cache));
178} 187}
179 188
180/* 189/*
@@ -185,37 +194,30 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
185static inline const char *cache_name(struct kmem_cache *s) 194static inline const char *cache_name(struct kmem_cache *s)
186{ 195{
187 if (!is_root_cache(s)) 196 if (!is_root_cache(s))
188 return s->memcg_params->root_cache->name; 197 s = s->memcg_params.root_cache;
189 return s->name; 198 return s->name;
190} 199}
191 200
192/* 201/*
193 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 202 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
194 * That said the caller must assure the memcg's cache won't go away. Since once 203 * That said the caller must assure the memcg's cache won't go away by either
195 * created a memcg's cache is destroyed only along with the root cache, it is 204 * taking a css reference to the owner cgroup, or holding the slab_mutex.
196 * true if we are going to allocate from the cache or hold a reference to the
197 * root cache by other means. Otherwise, we should hold either the slab_mutex
198 * or the memcg's slab_caches_mutex while calling this function and accessing
199 * the returned value.
200 */ 205 */
201static inline struct kmem_cache * 206static inline struct kmem_cache *
202cache_from_memcg_idx(struct kmem_cache *s, int idx) 207cache_from_memcg_idx(struct kmem_cache *s, int idx)
203{ 208{
204 struct kmem_cache *cachep; 209 struct kmem_cache *cachep;
205 struct memcg_cache_params *params; 210 struct memcg_cache_array *arr;
206
207 if (!s->memcg_params)
208 return NULL;
209 211
210 rcu_read_lock(); 212 rcu_read_lock();
211 params = rcu_dereference(s->memcg_params); 213 arr = rcu_dereference(s->memcg_params.memcg_caches);
212 214
213 /* 215 /*
214 * Make sure we will access the up-to-date value. The code updating 216 * Make sure we will access the up-to-date value. The code updating
215 * memcg_caches issues a write barrier to match this (see 217 * memcg_caches issues a write barrier to match this (see
216 * memcg_register_cache()). 218 * memcg_create_kmem_cache()).
217 */ 219 */
218 cachep = lockless_dereference(params->memcg_caches[idx]); 220 cachep = lockless_dereference(arr->entries[idx]);
219 rcu_read_unlock(); 221 rcu_read_unlock();
220 222
221 return cachep; 223 return cachep;
@@ -225,7 +227,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
225{ 227{
226 if (is_root_cache(s)) 228 if (is_root_cache(s))
227 return s; 229 return s;
228 return s->memcg_params->root_cache; 230 return s->memcg_params.root_cache;
229} 231}
230 232
231static __always_inline int memcg_charge_slab(struct kmem_cache *s, 233static __always_inline int memcg_charge_slab(struct kmem_cache *s,
@@ -235,7 +237,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
235 return 0; 237 return 0;
236 if (is_root_cache(s)) 238 if (is_root_cache(s))
237 return 0; 239 return 0;
238 return __memcg_charge_slab(s, gfp, order); 240 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
239} 241}
240 242
241static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 243static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -244,9 +246,18 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
244 return; 246 return;
245 if (is_root_cache(s)) 247 if (is_root_cache(s))
246 return; 248 return;
247 __memcg_uncharge_slab(s, order); 249 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
248} 250}
249#else 251
252extern void slab_init_memcg_params(struct kmem_cache *);
253
254#else /* !CONFIG_MEMCG_KMEM */
255
256#define for_each_memcg_cache(iter, root) \
257 for ((void)(iter), (void)(root); 0; )
258#define for_each_memcg_cache_safe(iter, tmp, root) \
259 for ((void)(iter), (void)(tmp), (void)(root); 0; )
260
250static inline bool is_root_cache(struct kmem_cache *s) 261static inline bool is_root_cache(struct kmem_cache *s)
251{ 262{
252 return true; 263 return true;
@@ -282,7 +293,11 @@ static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
282static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 293static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
283{ 294{
284} 295}
285#endif 296
297static inline void slab_init_memcg_params(struct kmem_cache *s)
298{
299}
300#endif /* CONFIG_MEMCG_KMEM */
286 301
287static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 302static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
288{ 303{