diff options
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 70 |
1 files changed, 63 insertions, 7 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..5046f90c1171 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -46,7 +47,6 @@ struct kmem_cache_cpu { | |||
46 | struct kmem_cache_node { | 47 | struct kmem_cache_node { |
47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 48 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
48 | unsigned long nr_partial; | 49 | unsigned long nr_partial; |
49 | unsigned long min_partial; | ||
50 | struct list_head partial; | 50 | struct list_head partial; |
51 | #ifdef CONFIG_SLUB_DEBUG | 51 | #ifdef CONFIG_SLUB_DEBUG |
52 | atomic_long_t nr_slabs; | 52 | atomic_long_t nr_slabs; |
@@ -89,6 +89,7 @@ struct kmem_cache { | |||
89 | void (*ctor)(void *); | 89 | void (*ctor)(void *); |
90 | int inuse; /* Offset to metadata */ | 90 | int inuse; /* Offset to metadata */ |
91 | int align; /* Alignment */ | 91 | int align; /* Alignment */ |
92 | unsigned long min_partial; | ||
92 | const char *name; /* Name (only for display!) */ | 93 | const char *name; /* Name (only for display!) */ |
93 | struct list_head list; /* List of slab caches */ | 94 | struct list_head list; /* List of slab caches */ |
94 | #ifdef CONFIG_SLUB_DEBUG | 95 | #ifdef CONFIG_SLUB_DEBUG |
@@ -121,10 +122,23 @@ struct kmem_cache { | |||
121 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
122 | 123 | ||
123 | /* | 124 | /* |
125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | ||
126 | * are passed through to the page allocator. The page allocator "fastpath" | ||
127 | * is relatively slow so we need this value sufficiently high so that | ||
128 | * performance critical objects are allocated through the SLUB fastpath. | ||
129 | * | ||
130 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | ||
131 | * "fastpath" becomes competitive with the slab allocator fastpaths. | ||
132 | */ | ||
133 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) | ||
134 | |||
135 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) | ||
136 | |||
137 | /* | ||
124 | * We keep the general caches in an array of slab caches that are used for | 138 | * We keep the general caches in an array of slab caches that are used for |
125 | * 2^x bytes of allocations. | 139 | * 2^x bytes of allocations. |
126 | */ | 140 | */ |
127 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; | 141 | extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; |
128 | 142 | ||
129 | /* | 143 | /* |
130 | * Sorry that the following has to be that ugly but some versions of GCC | 144 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -204,15 +218,32 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 218 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
205 | void *__kmalloc(size_t size, gfp_t flags); | 219 | void *__kmalloc(size_t size, gfp_t flags); |
206 | 220 | ||
221 | #ifdef CONFIG_KMEMTRACE | ||
222 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
223 | #else | ||
224 | static __always_inline void * | ||
225 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
226 | { | ||
227 | return kmem_cache_alloc(s, gfpflags); | ||
228 | } | ||
229 | #endif | ||
230 | |||
207 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 231 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
208 | { | 232 | { |
209 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 233 | unsigned int order = get_order(size); |
234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
235 | |||
236 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | ||
237 | |||
238 | return ret; | ||
210 | } | 239 | } |
211 | 240 | ||
212 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 241 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
213 | { | 242 | { |
243 | void *ret; | ||
244 | |||
214 | if (__builtin_constant_p(size)) { | 245 | if (__builtin_constant_p(size)) { |
215 | if (size > PAGE_SIZE) | 246 | if (size > SLUB_MAX_SIZE) |
216 | return kmalloc_large(size, flags); | 247 | return kmalloc_large(size, flags); |
217 | 248 | ||
218 | if (!(flags & SLUB_DMA)) { | 249 | if (!(flags & SLUB_DMA)) { |
@@ -221,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
221 | if (!s) | 252 | if (!s) |
222 | return ZERO_SIZE_PTR; | 253 | return ZERO_SIZE_PTR; |
223 | 254 | ||
224 | return kmem_cache_alloc(s, flags); | 255 | ret = kmem_cache_alloc_notrace(s, flags); |
256 | |||
257 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); | ||
258 | |||
259 | return ret; | ||
225 | } | 260 | } |
226 | } | 261 | } |
227 | return __kmalloc(size, flags); | 262 | return __kmalloc(size, flags); |
@@ -231,16 +266,37 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
231 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 266 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
232 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 267 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
233 | 268 | ||
269 | #ifdef CONFIG_KMEMTRACE | ||
270 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
271 | gfp_t gfpflags, | ||
272 | int node); | ||
273 | #else | ||
274 | static __always_inline void * | ||
275 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
276 | gfp_t gfpflags, | ||
277 | int node) | ||
278 | { | ||
279 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
280 | } | ||
281 | #endif | ||
282 | |||
234 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 283 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
235 | { | 284 | { |
285 | void *ret; | ||
286 | |||
236 | if (__builtin_constant_p(size) && | 287 | if (__builtin_constant_p(size) && |
237 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { | 288 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
238 | struct kmem_cache *s = kmalloc_slab(size); | 289 | struct kmem_cache *s = kmalloc_slab(size); |
239 | 290 | ||
240 | if (!s) | 291 | if (!s) |
241 | return ZERO_SIZE_PTR; | 292 | return ZERO_SIZE_PTR; |
242 | 293 | ||
243 | return kmem_cache_alloc_node(s, flags, node); | 294 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
295 | |||
296 | trace_kmalloc_node(_THIS_IP_, ret, | ||
297 | size, s->size, flags, node); | ||
298 | |||
299 | return ret; | ||
244 | } | 300 | } |
245 | return __kmalloc_node(size, flags, node); | 301 | return __kmalloc_node(size, flags, node); |
246 | } | 302 | } |