diff options
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 74 |
1 files changed, 67 insertions, 7 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..a1f90528e70b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -46,7 +47,6 @@ struct kmem_cache_cpu { | |||
46 | struct kmem_cache_node { | 47 | struct kmem_cache_node { |
47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 48 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
48 | unsigned long nr_partial; | 49 | unsigned long nr_partial; |
49 | unsigned long min_partial; | ||
50 | struct list_head partial; | 50 | struct list_head partial; |
51 | #ifdef CONFIG_SLUB_DEBUG | 51 | #ifdef CONFIG_SLUB_DEBUG |
52 | atomic_long_t nr_slabs; | 52 | atomic_long_t nr_slabs; |
@@ -89,6 +89,7 @@ struct kmem_cache { | |||
89 | void (*ctor)(void *); | 89 | void (*ctor)(void *); |
90 | int inuse; /* Offset to metadata */ | 90 | int inuse; /* Offset to metadata */ |
91 | int align; /* Alignment */ | 91 | int align; /* Alignment */ |
92 | unsigned long min_partial; | ||
92 | const char *name; /* Name (only for display!) */ | 93 | const char *name; /* Name (only for display!) */ |
93 | struct list_head list; /* List of slab caches */ | 94 | struct list_head list; /* List of slab caches */ |
94 | #ifdef CONFIG_SLUB_DEBUG | 95 | #ifdef CONFIG_SLUB_DEBUG |
@@ -121,10 +122,23 @@ struct kmem_cache { | |||
121 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
122 | 123 | ||
123 | /* | 124 | /* |
125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | ||
126 | * are passed through to the page allocator. The page allocator "fastpath" | ||
127 | * is relatively slow so we need this value sufficiently high so that | ||
128 | * performance critical objects are allocated through the SLUB fastpath. | ||
129 | * | ||
130 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | ||
131 | * "fastpath" becomes competitive with the slab allocator fastpaths. | ||
132 | */ | ||
133 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) | ||
134 | |||
135 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) | ||
136 | |||
137 | /* | ||
124 | * We keep the general caches in an array of slab caches that are used for | 138 | * We keep the general caches in an array of slab caches that are used for |
125 | * 2^x bytes of allocations. | 139 | * 2^x bytes of allocations. |
126 | */ | 140 | */ |
127 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; | 141 | extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; |
128 | 142 | ||
129 | /* | 143 | /* |
130 | * Sorry that the following has to be that ugly but some versions of GCC | 144 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -204,15 +218,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 218 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 219 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 220 | ||
221 | #ifdef CONFIG_KMEMTRACE | ||
222 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
223 | #else | ||
224 | static __always_inline void * | ||
225 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
226 | { | ||
227 | return kmem_cache_alloc(s, gfpflags); | ||
228 | } | ||
229 | #endif | ||
230 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 231 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 232 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 233 | unsigned int order = get_order(size); |
234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
235 | |||
236 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, | ||
237 | size, PAGE_SIZE << order, flags); | ||
238 | |||
239 | return ret; | ||
210 | } | 240 | } |
211 | 241 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 242 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 243 | { |
244 | void *ret; | ||
245 | |||
214 | if (__builtin_constant_p(size)) { | 246 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 247 | if (size > SLUB_MAX_SIZE) |
216 | return kmalloc_large(size, flags); | 248 | return kmalloc_large(size, flags); |
217 | 249 | ||
218 | if (!(flags & SLUB_DMA)) { | 250 | if (!(flags & SLUB_DMA)) { |
@@ -221,7 +253,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 253 | if (!s) |
222 | return ZERO_SIZE_PTR; | 254 | return ZERO_SIZE_PTR; |
223 | 255 | ||
224 | return kmem_cache_alloc(s, flags); | 256 | ret = kmem_cache_alloc_notrace(s, flags); |
257 | |||
258 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, | ||
259 | _THIS_IP_, ret, | ||
260 | size, s->size, flags); | ||
261 | |||
262 | return ret; | ||
225 | } | 263 | } |
226 | } | 264 | } |
227 | return __kmalloc(size, flags); | 265 | return __kmalloc(size, flags); |
@@ -231,16 +269,38 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 269 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 270 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 271 | ||
272 | #ifdef CONFIG_KMEMTRACE | ||
273 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
274 | gfp_t gfpflags, | ||
275 | int node); | ||
276 | #else | ||
277 | static __always_inline void * | ||
278 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
279 | gfp_t gfpflags, | ||
280 | int node) | ||
281 | { | ||
282 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
283 | } | ||
284 | #endif | ||
285 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 286 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 287 | { |
288 | void *ret; | ||
289 | |||
236 | if (__builtin_constant_p(size) && | 290 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 291 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 292 | struct kmem_cache *s = kmalloc_slab(size); |
239 | 293 | ||
240 | if (!s) | 294 | if (!s) |
241 | return ZERO_SIZE_PTR; | 295 | return ZERO_SIZE_PTR; |
242 | 296 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 297 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
298 | |||
299 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, | ||
300 | _THIS_IP_, ret, | ||
301 | size, s->size, flags, node); | ||
302 | |||
303 | return ret; | ||
244 | } | 304 | } |
245 | return __kmalloc_node(size, flags, node); | 305 | return __kmalloc_node(size, flags, node); |
246 | } | 306 | } |