summaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h81
1 files changed, 51 insertions, 30 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index af5aa65c7c18..50697a1d6621 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,13 +21,20 @@
21 * Flags to pass to kmem_cache_create(). 21 * Flags to pass to kmem_cache_create().
22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
23 */ 23 */
24#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ 24/* DEBUG: Perform (expensive) checks on alloc/free */
25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26/* DEBUG: Red zone objs in a cache */
27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28/* DEBUG: Poison objects */
29#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
30#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30/* Align objs on cache lines */
31#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
32/* Use GFP_DMA memory */
33#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
34/* DEBUG: Store the last owner for bug hunting */
35#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
36/* Panic if kmem_cache_create() fails */
37#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
31/* 38/*
32 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 39 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
33 * 40 *
@@ -65,44 +72,45 @@
65 * 72 *
66 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 73 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
67 */ 74 */
68#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 75/* Defer freeing slabs to RCU */
69#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 76#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
70#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 77/* Spread some memory over cpuset */
78#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
79/* Trace allocations and frees */
80#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
71 81
72/* Flag to prevent checks on free */ 82/* Flag to prevent checks on free */
73#ifdef CONFIG_DEBUG_OBJECTS 83#ifdef CONFIG_DEBUG_OBJECTS
74# define SLAB_DEBUG_OBJECTS 0x00400000UL 84# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
75#else 85#else
76# define SLAB_DEBUG_OBJECTS 0x00000000UL 86# define SLAB_DEBUG_OBJECTS 0
77#endif 87#endif
78 88
79#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 89/* Avoid kmemleak tracing */
90#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
80 91
81/* Don't track use of uninitialized memory */ 92/* Fault injection mark */
82#ifdef CONFIG_KMEMCHECK
83# define SLAB_NOTRACK 0x01000000UL
84#else
85# define SLAB_NOTRACK 0x00000000UL
86#endif
87#ifdef CONFIG_FAILSLAB 93#ifdef CONFIG_FAILSLAB
88# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 94# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
89#else 95#else
90# define SLAB_FAILSLAB 0x00000000UL 96# define SLAB_FAILSLAB 0
91#endif 97#endif
98/* Account to memcg */
92#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 99#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
93# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ 100# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
94#else 101#else
95# define SLAB_ACCOUNT 0x00000000UL 102# define SLAB_ACCOUNT 0
96#endif 103#endif
97 104
98#ifdef CONFIG_KASAN 105#ifdef CONFIG_KASAN
99#define SLAB_KASAN 0x08000000UL 106#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
100#else 107#else
101#define SLAB_KASAN 0x00000000UL 108#define SLAB_KASAN 0
102#endif 109#endif
103 110
104/* The following flags affect the page allocator grouping pages by mobility */ 111/* The following flags affect the page allocator grouping pages by mobility */
105#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 112/* Objects are reclaimable */
113#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
106#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 114#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
107/* 115/*
108 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 116 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +136,7 @@ void __init kmem_cache_init(void);
128bool slab_is_available(void); 136bool slab_is_available(void);
129 137
130struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 138struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
131 unsigned long, 139 slab_flags_t,
132 void (*)(void *)); 140 void (*)(void *));
133void kmem_cache_destroy(struct kmem_cache *); 141void kmem_cache_destroy(struct kmem_cache *);
134int kmem_cache_shrink(struct kmem_cache *); 142int kmem_cache_shrink(struct kmem_cache *);
@@ -459,9 +467,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
459 * Also it is possible to set different flags by OR'ing 467 * Also it is possible to set different flags by OR'ing
460 * in one or more of the following additional @flags: 468 * in one or more of the following additional @flags:
461 * 469 *
462 * %__GFP_COLD - Request cache-cold pages instead of
463 * trying to return cache-warm pages.
464 *
465 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 470 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
466 * 471 *
467 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 472 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
@@ -636,6 +641,22 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
636#define kmalloc_track_caller(size, flags) \ 641#define kmalloc_track_caller(size, flags) \
637 __kmalloc_track_caller(size, flags, _RET_IP_) 642 __kmalloc_track_caller(size, flags, _RET_IP_)
638 643
644static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
645 int node)
646{
647 if (size != 0 && n > SIZE_MAX / size)
648 return NULL;
649 if (__builtin_constant_p(n) && __builtin_constant_p(size))
650 return kmalloc_node(n * size, flags, node);
651 return __kmalloc_node(n * size, flags, node);
652}
653
654static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
655{
656 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
657}
658
659
639#ifdef CONFIG_NUMA 660#ifdef CONFIG_NUMA
640extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 661extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
641#define kmalloc_node_track_caller(size, flags, node) \ 662#define kmalloc_node_track_caller(size, flags, node) \