diff options
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 156 |
1 files changed, 124 insertions, 32 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 6c5cc0ea8713..386af639dcaa 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -4,6 +4,8 @@ | |||
4 | * (C) SGI 2006, Christoph Lameter | 4 | * (C) SGI 2006, Christoph Lameter |
5 | * Cleaned up and restructured to ease the addition of alternative | 5 | * Cleaned up and restructured to ease the addition of alternative |
6 | * implementations of SLAB allocators. | 6 | * implementations of SLAB allocators. |
7 | * (C) Linux Foundation 2008-2013 | ||
8 | * Unified interface for all slab allocators | ||
7 | */ | 9 | */ |
8 | 10 | ||
9 | #ifndef _LINUX_SLAB_H | 11 | #ifndef _LINUX_SLAB_H |
@@ -94,6 +96,7 @@ | |||
94 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 96 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
95 | (unsigned long)ZERO_SIZE_PTR) | 97 | (unsigned long)ZERO_SIZE_PTR) |
96 | 98 | ||
99 | #include <linux/kmemleak.h> | ||
97 | 100 | ||
98 | struct mem_cgroup; | 101 | struct mem_cgroup; |
99 | /* | 102 | /* |
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size) | |||
289 | } | 292 | } |
290 | #endif /* !CONFIG_SLOB */ | 293 | #endif /* !CONFIG_SLOB */ |
291 | 294 | ||
295 | void *__kmalloc(size_t size, gfp_t flags); | ||
296 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | ||
297 | |||
298 | #ifdef CONFIG_NUMA | ||
299 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
300 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
301 | #else | ||
302 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
303 | { | ||
304 | return __kmalloc(size, flags); | ||
305 | } | ||
306 | |||
307 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | ||
308 | { | ||
309 | return kmem_cache_alloc(s, flags); | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | #ifdef CONFIG_TRACING | ||
314 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | ||
315 | |||
316 | #ifdef CONFIG_NUMA | ||
317 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
318 | gfp_t gfpflags, | ||
319 | int node, size_t size); | ||
320 | #else | ||
321 | static __always_inline void * | ||
322 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
323 | gfp_t gfpflags, | ||
324 | int node, size_t size) | ||
325 | { | ||
326 | return kmem_cache_alloc_trace(s, gfpflags, size); | ||
327 | } | ||
328 | #endif /* CONFIG_NUMA */ | ||
329 | |||
330 | #else /* CONFIG_TRACING */ | ||
331 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | ||
332 | gfp_t flags, size_t size) | ||
333 | { | ||
334 | return kmem_cache_alloc(s, flags); | ||
335 | } | ||
336 | |||
337 | static __always_inline void * | ||
338 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
339 | gfp_t gfpflags, | ||
340 | int node, size_t size) | ||
341 | { | ||
342 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
343 | } | ||
344 | #endif /* CONFIG_TRACING */ | ||
345 | |||
292 | #ifdef CONFIG_SLAB | 346 | #ifdef CONFIG_SLAB |
293 | #include <linux/slab_def.h> | 347 | #include <linux/slab_def.h> |
294 | #endif | 348 | #endif |
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size) | |||
297 | #include <linux/slub_def.h> | 351 | #include <linux/slub_def.h> |
298 | #endif | 352 | #endif |
299 | 353 | ||
300 | #ifdef CONFIG_SLOB | 354 | static __always_inline void * |
301 | #include <linux/slob_def.h> | 355 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
356 | { | ||
357 | void *ret; | ||
358 | |||
359 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
360 | ret = (void *) __get_free_pages(flags, order); | ||
361 | kmemleak_alloc(ret, size, 1, flags); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | #ifdef CONFIG_TRACING | ||
366 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
367 | #else | ||
368 | static __always_inline void * | ||
369 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
370 | { | ||
371 | return kmalloc_order(size, flags, order); | ||
372 | } | ||
373 | #endif | ||
374 | |||
375 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
376 | { | ||
377 | unsigned int order = get_order(size); | ||
378 | return kmalloc_order_trace(size, flags, order); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * kmalloc - allocate memory | ||
383 | * @size: how many bytes of memory are required. | ||
384 | * @flags: the type of memory to allocate (see kcalloc). | ||
385 | * | ||
386 | * kmalloc is the normal method of allocating memory | ||
387 | * for objects smaller than page size in the kernel. | ||
388 | */ | ||
389 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
390 | { | ||
391 | if (__builtin_constant_p(size)) { | ||
392 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
393 | return kmalloc_large(size, flags); | ||
394 | #ifndef CONFIG_SLOB | ||
395 | if (!(flags & GFP_DMA)) { | ||
396 | int index = kmalloc_index(size); | ||
397 | |||
398 | if (!index) | ||
399 | return ZERO_SIZE_PTR; | ||
400 | |||
401 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
402 | flags, size); | ||
403 | } | ||
302 | #endif | 404 | #endif |
405 | } | ||
406 | return __kmalloc(size, flags); | ||
407 | } | ||
303 | 408 | ||
304 | /* | 409 | /* |
305 | * Determine size used for the nth kmalloc cache. | 410 | * Determine size used for the nth kmalloc cache. |
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n) | |||
321 | return 0; | 426 | return 0; |
322 | } | 427 | } |
323 | 428 | ||
429 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
430 | { | ||
431 | #ifndef CONFIG_SLOB | ||
432 | if (__builtin_constant_p(size) && | ||
433 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLAB_CACHE_DMA)) { | ||
434 | int i = kmalloc_index(size); | ||
435 | |||
436 | if (!i) | ||
437 | return ZERO_SIZE_PTR; | ||
438 | |||
439 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | ||
440 | flags, node, size); | ||
441 | } | ||
442 | #endif | ||
443 | return __kmalloc_node(size, flags, node); | ||
444 | } | ||
445 | |||
324 | /* | 446 | /* |
325 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | 447 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. |
326 | * Intended for arches that get misalignment faults even for 64 bit integer | 448 | * Intended for arches that get misalignment faults even for 64 bit integer |
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
451 | return kmalloc_array(n, size, flags | __GFP_ZERO); | 573 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
452 | } | 574 | } |
453 | 575 | ||
454 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | ||
455 | /** | ||
456 | * kmalloc_node - allocate memory from a specific node | ||
457 | * @size: how many bytes of memory are required. | ||
458 | * @flags: the type of memory to allocate (see kmalloc). | ||
459 | * @node: node to allocate from. | ||
460 | * | ||
461 | * kmalloc() for non-local nodes, used to allocate from a specific node | ||
462 | * if available. Equivalent to kmalloc() in the non-NUMA single-node | ||
463 | * case. | ||
464 | */ | ||
465 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
466 | { | ||
467 | return kmalloc(size, flags); | ||
468 | } | ||
469 | |||
470 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
471 | { | ||
472 | return __kmalloc(size, flags); | ||
473 | } | ||
474 | |||
475 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
476 | |||
477 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | ||
478 | gfp_t flags, int node) | ||
479 | { | ||
480 | return kmem_cache_alloc(cachep, flags); | ||
481 | } | ||
482 | #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ | ||
483 | |||
484 | /* | 576 | /* |
485 | * kmalloc_track_caller is a special version of kmalloc that records the | 577 | * kmalloc_track_caller is a special version of kmalloc that records the |
486 | * calling function of the routine calling it for slab leak tracking instead | 578 | * calling function of the routine calling it for slab leak tracking instead |