summaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h60
1 files changed, 37 insertions, 23 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index af5aa65c7c18..0c4c579f52ed 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,13 +21,20 @@
21 * Flags to pass to kmem_cache_create(). 21 * Flags to pass to kmem_cache_create().
22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
23 */ 23 */
24#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ 24/* DEBUG: Perform (expensive) checks on alloc/free */
25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL)
26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26/* DEBUG: Red zone objs in a cache */
27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL)
28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28/* DEBUG: Poison objects */
29#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29#define SLAB_POISON ((slab_flags_t __force)0x00000800UL)
30#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30/* Align objs on cache lines */
31#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL)
32/* Use GFP_DMA memory */
33#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL)
34/* DEBUG: Store the last owner for bug hunting */
35#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL)
36/* Panic if kmem_cache_create() fails */
37#define SLAB_PANIC ((slab_flags_t __force)0x00040000UL)
31/* 38/*
32 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 39 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
33 * 40 *
@@ -65,44 +72,51 @@
65 * 72 *
66 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 73 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
67 */ 74 */
68#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 75/* Defer freeing slabs to RCU */
69#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 76#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL)
70#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 77/* Spread some memory over cpuset */
78#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL)
79/* Trace allocations and frees */
80#define SLAB_TRACE ((slab_flags_t __force)0x00200000UL)
71 81
72/* Flag to prevent checks on free */ 82/* Flag to prevent checks on free */
73#ifdef CONFIG_DEBUG_OBJECTS 83#ifdef CONFIG_DEBUG_OBJECTS
74# define SLAB_DEBUG_OBJECTS 0x00400000UL 84# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL)
75#else 85#else
76# define SLAB_DEBUG_OBJECTS 0x00000000UL 86# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL)
77#endif 87#endif
78 88
79#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 89/* Avoid kmemleak tracing */
90#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL)
80 91
81/* Don't track use of uninitialized memory */ 92/* Don't track use of uninitialized memory */
82#ifdef CONFIG_KMEMCHECK 93#ifdef CONFIG_KMEMCHECK
83# define SLAB_NOTRACK 0x01000000UL 94# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL)
84#else 95#else
85# define SLAB_NOTRACK 0x00000000UL 96# define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL)
86#endif 97#endif
98/* Fault injection mark */
87#ifdef CONFIG_FAILSLAB 99#ifdef CONFIG_FAILSLAB
88# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 100# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL)
89#else 101#else
90# define SLAB_FAILSLAB 0x00000000UL 102# define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL)
91#endif 103#endif
104/* Account to memcg */
92#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 105#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
93# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ 106# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL)
94#else 107#else
95# define SLAB_ACCOUNT 0x00000000UL 108# define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL)
96#endif 109#endif
97 110
98#ifdef CONFIG_KASAN 111#ifdef CONFIG_KASAN
99#define SLAB_KASAN 0x08000000UL 112#define SLAB_KASAN ((slab_flags_t __force)0x08000000UL)
100#else 113#else
101#define SLAB_KASAN 0x00000000UL 114#define SLAB_KASAN ((slab_flags_t __force)0x00000000UL)
102#endif 115#endif
103 116
104/* The following flags affect the page allocator grouping pages by mobility */ 117/* The following flags affect the page allocator grouping pages by mobility */
105#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 118/* Objects are reclaimable */
119#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL)
106#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 120#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
107/* 121/*
108 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 122 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +142,7 @@ void __init kmem_cache_init(void);
128bool slab_is_available(void); 142bool slab_is_available(void);
129 143
130struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 144struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
131 unsigned long, 145 slab_flags_t,
132 void (*)(void *)); 146 void (*)(void *));
133void kmem_cache_destroy(struct kmem_cache *); 147void kmem_cache_destroy(struct kmem_cache *);
134int kmem_cache_shrink(struct kmem_cache *); 148int kmem_cache_shrink(struct kmem_cache *);