diff options
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 82 |
1 files changed, 39 insertions, 43 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9f63538928c0..c8668d161dd8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,9 +10,8 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <linux/kmemleak.h> | ||
14 | 13 | ||
15 | #include <trace/events/kmem.h> | 14 | #include <linux/kmemleak.h> |
16 | 15 | ||
17 | enum stat_item { | 16 | enum stat_item { |
18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 17 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -33,10 +32,12 @@ enum stat_item { | |||
33 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | 32 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
34 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | 33 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
35 | ORDER_FALLBACK, /* Number of times fallback was necessary */ | 34 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
35 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ | ||
36 | NR_SLUB_STAT_ITEMS }; | 36 | NR_SLUB_STAT_ITEMS }; |
37 | 37 | ||
38 | struct kmem_cache_cpu { | 38 | struct kmem_cache_cpu { |
39 | void **freelist; /* Pointer to first free per cpu object */ | 39 | void **freelist; /* Pointer to next available object */ |
40 | unsigned long tid; /* Globally unique transaction id */ | ||
40 | struct page *page; /* The slab from which we are allocating */ | 41 | struct page *page; /* The slab from which we are allocating */ |
41 | int node; /* The node of the page (or -1 for debug) */ | 42 | int node; /* The node of the page (or -1 for debug) */ |
42 | #ifdef CONFIG_SLUB_STATS | 43 | #ifdef CONFIG_SLUB_STATS |
@@ -71,6 +72,7 @@ struct kmem_cache { | |||
71 | struct kmem_cache_cpu __percpu *cpu_slab; | 72 | struct kmem_cache_cpu __percpu *cpu_slab; |
72 | /* Used for retriving partial slabs etc */ | 73 | /* Used for retriving partial slabs etc */ |
73 | unsigned long flags; | 74 | unsigned long flags; |
75 | unsigned long min_partial; | ||
74 | int size; /* The size of an object including meta data */ | 76 | int size; /* The size of an object including meta data */ |
75 | int objsize; /* The size of an object without meta data */ | 77 | int objsize; /* The size of an object without meta data */ |
76 | int offset; /* Free pointer offset. */ | 78 | int offset; /* Free pointer offset. */ |
@@ -84,10 +86,10 @@ struct kmem_cache { | |||
84 | void (*ctor)(void *); | 86 | void (*ctor)(void *); |
85 | int inuse; /* Offset to metadata */ | 87 | int inuse; /* Offset to metadata */ |
86 | int align; /* Alignment */ | 88 | int align; /* Alignment */ |
87 | unsigned long min_partial; | 89 | int reserved; /* Reserved bytes at the end of slabs */ |
88 | const char *name; /* Name (only for display!) */ | 90 | const char *name; /* Name (only for display!) */ |
89 | struct list_head list; /* List of slab caches */ | 91 | struct list_head list; /* List of slab caches */ |
90 | #ifdef CONFIG_SLUB_DEBUG | 92 | #ifdef CONFIG_SYSFS |
91 | struct kobject kobj; /* For sysfs */ | 93 | struct kobject kobj; /* For sysfs */ |
92 | #endif | 94 | #endif |
93 | 95 | ||
@@ -96,11 +98,8 @@ struct kmem_cache { | |||
96 | * Defragmentation by allocating from a remote node. | 98 | * Defragmentation by allocating from a remote node. |
97 | */ | 99 | */ |
98 | int remote_node_defrag_ratio; | 100 | int remote_node_defrag_ratio; |
99 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
100 | #else | ||
101 | /* Avoid an extra cache line for UP */ | ||
102 | struct kmem_cache_node local_node; | ||
103 | #endif | 101 | #endif |
102 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
104 | }; | 103 | }; |
105 | 104 | ||
106 | /* | 105 | /* |
@@ -139,19 +138,16 @@ struct kmem_cache { | |||
139 | 138 | ||
140 | #ifdef CONFIG_ZONE_DMA | 139 | #ifdef CONFIG_ZONE_DMA |
141 | #define SLUB_DMA __GFP_DMA | 140 | #define SLUB_DMA __GFP_DMA |
142 | /* Reserve extra caches for potential DMA use */ | ||
143 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) | ||
144 | #else | 141 | #else |
145 | /* Disable DMA functionality */ | 142 | /* Disable DMA functionality */ |
146 | #define SLUB_DMA (__force gfp_t)0 | 143 | #define SLUB_DMA (__force gfp_t)0 |
147 | #define KMALLOC_CACHES SLUB_PAGE_SHIFT | ||
148 | #endif | 144 | #endif |
149 | 145 | ||
150 | /* | 146 | /* |
151 | * We keep the general caches in an array of slab caches that are used for | 147 | * We keep the general caches in an array of slab caches that are used for |
152 | * 2^x bytes of allocations. | 148 | * 2^x bytes of allocations. |
153 | */ | 149 | */ |
154 | extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; | 150 | extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
155 | 151 | ||
156 | /* | 152 | /* |
157 | * Sorry that the following has to be that ugly but some versions of GCC | 153 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -181,7 +177,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
181 | if (size <= 4 * 1024) return 12; | 177 | if (size <= 4 * 1024) return 12; |
182 | /* | 178 | /* |
183 | * The following is only needed to support architectures with a larger page | 179 | * The following is only needed to support architectures with a larger page |
184 | * size than 4k. | 180 | * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page |
181 | * size we would have to go up to 128k. | ||
185 | */ | 182 | */ |
186 | if (size <= 8 * 1024) return 13; | 183 | if (size <= 8 * 1024) return 13; |
187 | if (size <= 16 * 1024) return 14; | 184 | if (size <= 16 * 1024) return 14; |
@@ -192,7 +189,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
192 | if (size <= 512 * 1024) return 19; | 189 | if (size <= 512 * 1024) return 19; |
193 | if (size <= 1024 * 1024) return 20; | 190 | if (size <= 1024 * 1024) return 20; |
194 | if (size <= 2 * 1024 * 1024) return 21; | 191 | if (size <= 2 * 1024 * 1024) return 21; |
195 | return -1; | 192 | BUG(); |
193 | return -1; /* Will never be reached */ | ||
196 | 194 | ||
197 | /* | 195 | /* |
198 | * What we really wanted to do and cannot do because of compiler issues is: | 196 | * What we really wanted to do and cannot do because of compiler issues is: |
@@ -216,37 +214,46 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
216 | if (index == 0) | 214 | if (index == 0) |
217 | return NULL; | 215 | return NULL; |
218 | 216 | ||
219 | return &kmalloc_caches[index]; | 217 | return kmalloc_caches[index]; |
220 | } | 218 | } |
221 | 219 | ||
222 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 220 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
223 | void *__kmalloc(size_t size, gfp_t flags); | 221 | void *__kmalloc(size_t size, gfp_t flags); |
224 | 222 | ||
223 | static __always_inline void * | ||
224 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | ||
225 | { | ||
226 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
227 | kmemleak_alloc(ret, size, 1, flags); | ||
228 | return ret; | ||
229 | } | ||
230 | |||
225 | #ifdef CONFIG_TRACING | 231 | #ifdef CONFIG_TRACING |
226 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | 232 | extern void * |
233 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | ||
234 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
227 | #else | 235 | #else |
228 | static __always_inline void * | 236 | static __always_inline void * |
229 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 237 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
230 | { | 238 | { |
231 | return kmem_cache_alloc(s, gfpflags); | 239 | return kmem_cache_alloc(s, gfpflags); |
232 | } | 240 | } |
241 | |||
242 | static __always_inline void * | ||
243 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
244 | { | ||
245 | return kmalloc_order(size, flags, order); | ||
246 | } | ||
233 | #endif | 247 | #endif |
234 | 248 | ||
235 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 249 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
236 | { | 250 | { |
237 | unsigned int order = get_order(size); | 251 | unsigned int order = get_order(size); |
238 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | 252 | return kmalloc_order_trace(size, flags, order); |
239 | |||
240 | kmemleak_alloc(ret, size, 1, flags); | ||
241 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | ||
242 | |||
243 | return ret; | ||
244 | } | 253 | } |
245 | 254 | ||
246 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 255 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
247 | { | 256 | { |
248 | void *ret; | ||
249 | |||
250 | if (__builtin_constant_p(size)) { | 257 | if (__builtin_constant_p(size)) { |
251 | if (size > SLUB_MAX_SIZE) | 258 | if (size > SLUB_MAX_SIZE) |
252 | return kmalloc_large(size, flags); | 259 | return kmalloc_large(size, flags); |
@@ -257,11 +264,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
257 | if (!s) | 264 | if (!s) |
258 | return ZERO_SIZE_PTR; | 265 | return ZERO_SIZE_PTR; |
259 | 266 | ||
260 | ret = kmem_cache_alloc_notrace(s, flags); | 267 | return kmem_cache_alloc_trace(s, flags, size); |
261 | |||
262 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); | ||
263 | |||
264 | return ret; | ||
265 | } | 268 | } |
266 | } | 269 | } |
267 | return __kmalloc(size, flags); | 270 | return __kmalloc(size, flags); |
@@ -272,14 +275,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node); | |||
272 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 275 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
273 | 276 | ||
274 | #ifdef CONFIG_TRACING | 277 | #ifdef CONFIG_TRACING |
275 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 278 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
276 | gfp_t gfpflags, | 279 | gfp_t gfpflags, |
277 | int node); | 280 | int node, size_t size); |
278 | #else | 281 | #else |
279 | static __always_inline void * | 282 | static __always_inline void * |
280 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 283 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
281 | gfp_t gfpflags, | 284 | gfp_t gfpflags, |
282 | int node) | 285 | int node, size_t size) |
283 | { | 286 | { |
284 | return kmem_cache_alloc_node(s, gfpflags, node); | 287 | return kmem_cache_alloc_node(s, gfpflags, node); |
285 | } | 288 | } |
@@ -287,8 +290,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s, | |||
287 | 290 | ||
288 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 291 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
289 | { | 292 | { |
290 | void *ret; | ||
291 | |||
292 | if (__builtin_constant_p(size) && | 293 | if (__builtin_constant_p(size) && |
293 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | 294 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
294 | struct kmem_cache *s = kmalloc_slab(size); | 295 | struct kmem_cache *s = kmalloc_slab(size); |
@@ -296,12 +297,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
296 | if (!s) | 297 | if (!s) |
297 | return ZERO_SIZE_PTR; | 298 | return ZERO_SIZE_PTR; |
298 | 299 | ||
299 | ret = kmem_cache_alloc_node_notrace(s, flags, node); | 300 | return kmem_cache_alloc_node_trace(s, flags, node, size); |
300 | |||
301 | trace_kmalloc_node(_THIS_IP_, ret, | ||
302 | size, s->size, flags, node); | ||
303 | |||
304 | return ret; | ||
305 | } | 301 | } |
306 | return __kmalloc_node(size, flags, node); | 302 | return __kmalloc_node(size, flags, node); |
307 | } | 303 | } |