aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h48
1 files changed, 48 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 743a10415122..5d168d7e0a28 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,6 +11,8 @@
11 11
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/workqueue.h>
15
14 16
15/* 17/*
16 * Flags to pass to kmem_cache_create(). 18 * Flags to pass to kmem_cache_create().
@@ -116,6 +118,7 @@ struct kmem_cache {
116}; 118};
117#endif 119#endif
118 120
121struct mem_cgroup;
119/* 122/*
120 * struct kmem_cache related prototypes 123 * struct kmem_cache related prototypes
121 */ 124 */
@@ -125,6 +128,9 @@ int slab_is_available(void);
125struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 128struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
126 unsigned long, 129 unsigned long,
127 void (*)(void *)); 130 void (*)(void *));
131struct kmem_cache *
132kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
133 unsigned long, void (*)(void *), struct kmem_cache *);
128void kmem_cache_destroy(struct kmem_cache *); 134void kmem_cache_destroy(struct kmem_cache *);
129int kmem_cache_shrink(struct kmem_cache *); 135int kmem_cache_shrink(struct kmem_cache *);
130void kmem_cache_free(struct kmem_cache *, void *); 136void kmem_cache_free(struct kmem_cache *, void *);
@@ -175,6 +181,48 @@ void kmem_cache_free(struct kmem_cache *, void *);
175#ifndef ARCH_SLAB_MINALIGN 181#ifndef ARCH_SLAB_MINALIGN
176#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 182#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
177#endif 183#endif
184/*
185 * This is the main placeholder for memcg-related information in kmem caches.
186 * struct kmem_cache will hold a pointer to it, so the memory cost while
187 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
188 * would otherwise be if that would be bundled in kmem_cache: we'll need an
189 * extra pointer chase. But the trade off clearly lays in favor of not
190 * penalizing non-users.
191 *
192 * Both the root cache and the child caches will have it. For the root cache,
193 * this will hold a dynamically allocated array large enough to hold
194 * information about the currently limited memcgs in the system.
195 *
196 * Child caches will hold extra metadata needed for its operation. Fields are:
197 *
198 * @memcg: pointer to the memcg this cache belongs to
199 * @list: list_head for the list of all caches in this memcg
200 * @root_cache: pointer to the global, root cache, this cache was derived from
201 * @dead: set to true after the memcg dies; the cache may still be around.
202 * @nr_pages: number of pages that belongs to this cache.
203 * @destroy: worker to be called whenever we are ready, or believe we may be
204 * ready, to destroy this cache.
205 */
206struct memcg_cache_params {
207 bool is_root_cache;
208 union {
209 struct kmem_cache *memcg_caches[0];
210 struct {
211 struct mem_cgroup *memcg;
212 struct list_head list;
213 struct kmem_cache *root_cache;
214 bool dead;
215 atomic_t nr_pages;
216 struct work_struct destroy;
217 };
218 };
219};
220
221int memcg_update_all_caches(int num_memcgs);
222
223struct seq_file;
224int cache_show(struct kmem_cache *s, struct seq_file *m);
225void print_slabinfo_header(struct seq_file *m);
178 226
179/* 227/*
180 * Common kmalloc functions provided by all allocators 228 * Common kmalloc functions provided by all allocators