aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h45
1 files changed, 25 insertions, 20 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9a139b637069..76f1feeabd38 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -104,6 +104,7 @@
104 (unsigned long)ZERO_SIZE_PTR) 104 (unsigned long)ZERO_SIZE_PTR)
105 105
106#include <linux/kmemleak.h> 106#include <linux/kmemleak.h>
107#include <linux/kasan.h>
107 108
108struct mem_cgroup; 109struct mem_cgroup;
109/* 110/*
@@ -115,14 +116,12 @@ int slab_is_available(void);
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 117 unsigned long,
117 void (*)(void *)); 118 void (*)(void *));
118#ifdef CONFIG_MEMCG_KMEM
119struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
120 struct kmem_cache *,
121 const char *);
122#endif
123void kmem_cache_destroy(struct kmem_cache *); 119void kmem_cache_destroy(struct kmem_cache *);
124int kmem_cache_shrink(struct kmem_cache *); 120int kmem_cache_shrink(struct kmem_cache *);
125void kmem_cache_free(struct kmem_cache *, void *); 121
122void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
123void memcg_deactivate_kmem_caches(struct mem_cgroup *);
124void memcg_destroy_kmem_caches(struct mem_cgroup *);
126 125
127/* 126/*
128 * Please use this macro to create slab caches. Simply specify the 127 * Please use this macro to create slab caches. Simply specify the
@@ -289,6 +288,7 @@ static __always_inline int kmalloc_index(size_t size)
289 288
290void *__kmalloc(size_t size, gfp_t flags); 289void *__kmalloc(size_t size, gfp_t flags);
291void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *);
292 292
293#ifdef CONFIG_NUMA 293#ifdef CONFIG_NUMA
294void *__kmalloc_node(size_t size, gfp_t flags, int node); 294void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -326,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
326static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 326static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
327 gfp_t flags, size_t size) 327 gfp_t flags, size_t size)
328{ 328{
329 return kmem_cache_alloc(s, flags); 329 void *ret = kmem_cache_alloc(s, flags);
330
331 kasan_kmalloc(s, ret, size);
332 return ret;
330} 333}
331 334
332static __always_inline void * 335static __always_inline void *
@@ -334,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
334 gfp_t gfpflags, 337 gfp_t gfpflags,
335 int node, size_t size) 338 int node, size_t size)
336{ 339{
337 return kmem_cache_alloc_node(s, gfpflags, node); 340 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
341
342 kasan_kmalloc(s, ret, size);
343 return ret;
338} 344}
339#endif /* CONFIG_TRACING */ 345#endif /* CONFIG_TRACING */
340 346
@@ -474,14 +480,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
474#ifndef ARCH_SLAB_MINALIGN 480#ifndef ARCH_SLAB_MINALIGN
475#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 481#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
476#endif 482#endif
483
484struct memcg_cache_array {
485 struct rcu_head rcu;
486 struct kmem_cache *entries[0];
487};
488
477/* 489/*
478 * This is the main placeholder for memcg-related information in kmem caches. 490 * This is the main placeholder for memcg-related information in kmem caches.
479 * struct kmem_cache will hold a pointer to it, so the memory cost while
480 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
481 * would otherwise be if that would be bundled in kmem_cache: we'll need an
482 * extra pointer chase. But the trade off clearly lays in favor of not
483 * penalizing non-users.
484 *
485 * Both the root cache and the child caches will have it. For the root cache, 491 * Both the root cache and the child caches will have it. For the root cache,
486 * this will hold a dynamically allocated array large enough to hold 492 * this will hold a dynamically allocated array large enough to hold
487 * information about the currently limited memcgs in the system. To allow the 493 * information about the currently limited memcgs in the system. To allow the
@@ -491,19 +497,18 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
491 * Child caches will hold extra metadata needed for its operation. Fields are: 497 * Child caches will hold extra metadata needed for its operation. Fields are:
492 * 498 *
493 * @memcg: pointer to the memcg this cache belongs to 499 * @memcg: pointer to the memcg this cache belongs to
494 * @list: list_head for the list of all caches in this memcg
495 * @root_cache: pointer to the global, root cache, this cache was derived from 500 * @root_cache: pointer to the global, root cache, this cache was derived from
501 *
502 * Both root and child caches of the same kind are linked into a list chained
503 * through @list.
496 */ 504 */
497struct memcg_cache_params { 505struct memcg_cache_params {
498 bool is_root_cache; 506 bool is_root_cache;
507 struct list_head list;
499 union { 508 union {
500 struct { 509 struct memcg_cache_array __rcu *memcg_caches;
501 struct rcu_head rcu_head;
502 struct kmem_cache *memcg_caches[0];
503 };
504 struct { 510 struct {
505 struct mem_cgroup *memcg; 511 struct mem_cgroup *memcg;
506 struct list_head list;
507 struct kmem_cache *root_cache; 512 struct kmem_cache *root_cache;
508 }; 513 };
509 }; 514 };