aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h36
1 files changed, 27 insertions, 9 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1ef822e31c77..71829efc40ba 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,28 +21,25 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22 */ 22 */
23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 23#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
25#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 24#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 25#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 26#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 27#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 28#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 29#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
33#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 31#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
34#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 32#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
33#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
35 34
36/* Flags passed to a constructor functions */ 35/* Flags passed to a constructor functions */
37#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ 36#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
38#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
39#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
40 37
41/* 38/*
42 * struct kmem_cache related prototypes 39 * struct kmem_cache related prototypes
43 */ 40 */
44void __init kmem_cache_init(void); 41void __init kmem_cache_init(void);
45extern int slab_is_available(void); 42int slab_is_available(void);
46 43
47struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 44struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
48 unsigned long, 45 unsigned long,
@@ -57,6 +54,18 @@ unsigned int kmem_cache_size(struct kmem_cache *);
57const char *kmem_cache_name(struct kmem_cache *); 54const char *kmem_cache_name(struct kmem_cache *);
58int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); 55int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
59 56
57/*
58 * Please use this macro to create slab caches. Simply specify the
59 * name of the structure and maybe some flags that are listed above.
60 *
61 * The alignment of the struct determines object alignment. If you
62 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
63 * then the objects will be properly aligned in SMP configurations.
64 */
65#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
66 sizeof(struct __struct), __alignof__(struct __struct),\
67 (__flags), NULL, NULL)
68
60#ifdef CONFIG_NUMA 69#ifdef CONFIG_NUMA
61extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 70extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
62#else 71#else
@@ -72,8 +81,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
72 */ 81 */
73void *__kmalloc(size_t, gfp_t); 82void *__kmalloc(size_t, gfp_t);
74void *__kzalloc(size_t, gfp_t); 83void *__kzalloc(size_t, gfp_t);
84void * __must_check krealloc(const void *, size_t, gfp_t);
75void kfree(const void *); 85void kfree(const void *);
76unsigned int ksize(const void *); 86size_t ksize(const void *);
77 87
78/** 88/**
79 * kcalloc - allocate memory for an array. The memory is set to zero. 89 * kcalloc - allocate memory for an array. The memory is set to zero.
@@ -94,9 +104,14 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
94 * the appropriate general cache at compile time. 104 * the appropriate general cache at compile time.
95 */ 105 */
96 106
97#ifdef CONFIG_SLAB 107#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
108#ifdef CONFIG_SLUB
109#include <linux/slub_def.h>
110#else
98#include <linux/slab_def.h> 111#include <linux/slab_def.h>
112#endif /* !CONFIG_SLUB */
99#else 113#else
114
100/* 115/*
101 * Fallback definitions for an allocator not wanting to provide 116 * Fallback definitions for an allocator not wanting to provide
102 * its own optimized kmalloc definitions (like SLOB). 117 * its own optimized kmalloc definitions (like SLOB).
@@ -183,7 +198,7 @@ static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
183 * allocator where we care about the real place the memory allocation 198 * allocator where we care about the real place the memory allocation
184 * request comes from. 199 * request comes from.
185 */ 200 */
186#ifdef CONFIG_DEBUG_SLAB 201#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
187extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 202extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
188#define kmalloc_track_caller(size, flags) \ 203#define kmalloc_track_caller(size, flags) \
189 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 204 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
@@ -201,7 +216,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
201 * standard allocator where we care about the real place the memory 216 * standard allocator where we care about the real place the memory
202 * allocation request comes from. 217 * allocation request comes from.
203 */ 218 */
204#ifdef CONFIG_DEBUG_SLAB 219#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
205extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 220extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
206#define kmalloc_node_track_caller(size, flags, node) \ 221#define kmalloc_node_track_caller(size, flags, node) \
207 __kmalloc_node_track_caller(size, flags, node, \ 222 __kmalloc_node_track_caller(size, flags, node, \
@@ -218,6 +233,9 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
218 233
219#endif /* DEBUG_SLAB */ 234#endif /* DEBUG_SLAB */
220 235
236extern const struct seq_operations slabinfo_op;
237ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
238
221#endif /* __KERNEL__ */ 239#endif /* __KERNEL__ */
222#endif /* _LINUX_SLAB_H */ 240#endif /* _LINUX_SLAB_H */
223 241