aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h26
1 files changed, 16 insertions, 10 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c8668d161dd8..f58d6413d230 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -24,6 +24,7 @@ enum stat_item {
24 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ 24 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
25 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 25 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
26 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 26 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
27 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
27 FREE_SLAB, /* Slab freed to the page allocator */ 28 FREE_SLAB, /* Slab freed to the page allocator */
28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 29 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 30 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
@@ -31,8 +32,10 @@ enum stat_item {
31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 32 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 33 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 34 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
35 DEACTIVATE_BYPASS, /* Implicit deactivation */
34 ORDER_FALLBACK, /* Number of times fallback was necessary */ 36 ORDER_FALLBACK, /* Number of times fallback was necessary */
35 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ 37 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
38 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
36 NR_SLUB_STAT_ITEMS }; 39 NR_SLUB_STAT_ITEMS };
37 40
38struct kmem_cache_cpu { 41struct kmem_cache_cpu {
@@ -113,16 +116,6 @@ struct kmem_cache {
113 116
114#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 117#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
115 118
116#ifdef ARCH_DMA_MINALIGN
117#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
118#else
119#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
120#endif
121
122#ifndef ARCH_SLAB_MINALIGN
123#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
124#endif
125
126/* 119/*
127 * Maximum kmalloc object size handled by SLUB. Larger object allocations 120 * Maximum kmalloc object size handled by SLUB. Larger object allocations
128 * are passed through to the page allocator. The page allocator "fastpath" 121 * are passed through to the page allocator. The page allocator "fastpath"
@@ -228,6 +221,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
228 return ret; 221 return ret;
229} 222}
230 223
224/**
225 * Calling this on allocated memory will check that the memory
226 * is expected to be in use, and print warnings if not.
227 */
228#ifdef CONFIG_SLUB_DEBUG
229extern bool verify_mem_not_deleted(const void *x);
230#else
231static inline bool verify_mem_not_deleted(const void *x)
232{
233 return true;
234}
235#endif
236
231#ifdef CONFIG_TRACING 237#ifdef CONFIG_TRACING
232extern void * 238extern void *
233kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); 239kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);