diff options
Diffstat (limited to 'include/linux/slab.h')
| -rw-r--r-- | include/linux/slab.h | 57 |
1 files changed, 56 insertions, 1 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 83d1a1454b7e..5d168d7e0a28 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
| 13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
| 14 | #include <linux/workqueue.h> | ||
| 15 | |||
| 14 | 16 | ||
| 15 | /* | 17 | /* |
| 16 | * Flags to pass to kmem_cache_create(). | 18 | * Flags to pass to kmem_cache_create(). |
| @@ -116,6 +118,7 @@ struct kmem_cache { | |||
| 116 | }; | 118 | }; |
| 117 | #endif | 119 | #endif |
| 118 | 120 | ||
| 121 | struct mem_cgroup; | ||
| 119 | /* | 122 | /* |
| 120 | * struct kmem_cache related prototypes | 123 | * struct kmem_cache related prototypes |
| 121 | */ | 124 | */ |
| @@ -125,10 +128,12 @@ int slab_is_available(void); | |||
| 125 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 128 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
| 126 | unsigned long, | 129 | unsigned long, |
| 127 | void (*)(void *)); | 130 | void (*)(void *)); |
| 131 | struct kmem_cache * | ||
| 132 | kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, | ||
| 133 | unsigned long, void (*)(void *), struct kmem_cache *); | ||
| 128 | void kmem_cache_destroy(struct kmem_cache *); | 134 | void kmem_cache_destroy(struct kmem_cache *); |
| 129 | int kmem_cache_shrink(struct kmem_cache *); | 135 | int kmem_cache_shrink(struct kmem_cache *); |
| 130 | void kmem_cache_free(struct kmem_cache *, void *); | 136 | void kmem_cache_free(struct kmem_cache *, void *); |
| 131 | unsigned int kmem_cache_size(struct kmem_cache *); | ||
| 132 | 137 | ||
| 133 | /* | 138 | /* |
| 134 | * Please use this macro to create slab caches. Simply specify the | 139 | * Please use this macro to create slab caches. Simply specify the |
| @@ -176,6 +181,48 @@ unsigned int kmem_cache_size(struct kmem_cache *); | |||
| 176 | #ifndef ARCH_SLAB_MINALIGN | 181 | #ifndef ARCH_SLAB_MINALIGN |
| 177 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | 182 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
| 178 | #endif | 183 | #endif |
| 184 | /* | ||
| 185 | * This is the main placeholder for memcg-related information in kmem caches. | ||
| 186 | * struct kmem_cache will hold a pointer to it, so the memory cost while | ||
| 187 | * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it | ||
| 188 | * would otherwise be if that would be bundled in kmem_cache: we'll need an | ||
| 189 | * extra pointer chase. But the trade off clearly lays in favor of not | ||
| 190 | * penalizing non-users. | ||
| 191 | * | ||
| 192 | * Both the root cache and the child caches will have it. For the root cache, | ||
| 193 | * this will hold a dynamically allocated array large enough to hold | ||
| 194 | * information about the currently limited memcgs in the system. | ||
| 195 | * | ||
| 196 | * Child caches will hold extra metadata needed for its operation. Fields are: | ||
| 197 | * | ||
| 198 | * @memcg: pointer to the memcg this cache belongs to | ||
| 199 | * @list: list_head for the list of all caches in this memcg | ||
| 200 | * @root_cache: pointer to the global, root cache, this cache was derived from | ||
| 201 | * @dead: set to true after the memcg dies; the cache may still be around. | ||
| 202 | * @nr_pages: number of pages that belongs to this cache. | ||
| 203 | * @destroy: worker to be called whenever we are ready, or believe we may be | ||
| 204 | * ready, to destroy this cache. | ||
| 205 | */ | ||
| 206 | struct memcg_cache_params { | ||
| 207 | bool is_root_cache; | ||
| 208 | union { | ||
| 209 | struct kmem_cache *memcg_caches[0]; | ||
| 210 | struct { | ||
| 211 | struct mem_cgroup *memcg; | ||
| 212 | struct list_head list; | ||
| 213 | struct kmem_cache *root_cache; | ||
| 214 | bool dead; | ||
| 215 | atomic_t nr_pages; | ||
| 216 | struct work_struct destroy; | ||
| 217 | }; | ||
| 218 | }; | ||
| 219 | }; | ||
| 220 | |||
| 221 | int memcg_update_all_caches(int num_memcgs); | ||
| 222 | |||
| 223 | struct seq_file; | ||
| 224 | int cache_show(struct kmem_cache *s, struct seq_file *m); | ||
| 225 | void print_slabinfo_header(struct seq_file *m); | ||
| 179 | 226 | ||
| 180 | /* | 227 | /* |
| 181 | * Common kmalloc functions provided by all allocators | 228 | * Common kmalloc functions provided by all allocators |
| @@ -388,6 +435,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |||
| 388 | return kmalloc_node(size, flags | __GFP_ZERO, node); | 435 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
| 389 | } | 436 | } |
| 390 | 437 | ||
| 438 | /* | ||
| 439 | * Determine the size of a slab object | ||
| 440 | */ | ||
| 441 | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | ||
| 442 | { | ||
| 443 | return s->object_size; | ||
| 444 | } | ||
| 445 | |||
| 391 | void __init kmem_cache_init_late(void); | 446 | void __init kmem_cache_init_late(void); |
| 392 | 447 | ||
| 393 | #endif /* _LINUX_SLAB_H */ | 448 | #endif /* _LINUX_SLAB_H */ |
