diff options
author | Christoph Lameter <cl@linux.com> | 2013-09-04 12:35:34 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-09-04 13:51:33 -0400 |
commit | f1b6eb6e6be149b40ebb013f5bfe2ac86b6f1c1b (patch) | |
tree | 245897276adc30bc17a23ba1b5364a065e2ecb74 /mm | |
parent | 9de1bc875261411bf0a900e90cfe0c7a31c4917b (diff) |
mm/sl[aou]b: Move kmallocXXX functions to common code
The kmalloc* functions of all slab allcoators are similar now so
lets move them into slab.h. This requires some function naming changes
in slob.
As a results of this patch there is a common set of functions for
all allocators. Also means that kmalloc_large() is now available
in general to perform large order allocations that go directly
via the page allocator. kmalloc_large() can be substituted if
kmalloc() throws warnings because of too large allocations.
kmalloc_large() has exactly the same semantics as kmalloc but
can only used for allocations > PAGE_SIZE.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab_common.c | 10 | ||||
-rw-r--r-- | mm/slob.c | 28 | ||||
-rw-r--r-- | mm/slub.c | 8 |
3 files changed, 34 insertions, 12 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index f0410eb61741..a3443278ce3a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | #include <linux/memcontrol.h> | 21 | #include <linux/memcontrol.h> |
22 | #include <trace/events/kmem.h> | ||
22 | 23 | ||
23 | #include "slab.h" | 24 | #include "slab.h" |
24 | 25 | ||
@@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags) | |||
495 | } | 496 | } |
496 | #endif /* !CONFIG_SLOB */ | 497 | #endif /* !CONFIG_SLOB */ |
497 | 498 | ||
499 | #ifdef CONFIG_TRACING | ||
500 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
501 | { | ||
502 | void *ret = kmalloc_order(size, flags, order); | ||
503 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
504 | return ret; | ||
505 | } | ||
506 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
507 | #endif | ||
498 | 508 | ||
499 | #ifdef CONFIG_SLABINFO | 509 | #ifdef CONFIG_SLABINFO |
500 | 510 | ||
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | |||
462 | return ret; | 462 | return ret; |
463 | } | 463 | } |
464 | 464 | ||
465 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 465 | void *__kmalloc(size_t size, gfp_t gfp) |
466 | { | 466 | { |
467 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | 467 | return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); |
468 | } | 468 | } |
469 | EXPORT_SYMBOL(__kmalloc_node); | 469 | EXPORT_SYMBOL(__kmalloc); |
470 | 470 | ||
471 | #ifdef CONFIG_TRACING | 471 | #ifdef CONFIG_TRACING |
472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) | 472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) |
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) | |||
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | 537 | void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
538 | { | 538 | { |
539 | void *b; | 539 | void *b; |
540 | 540 | ||
@@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | 560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); |
561 | return b; | 561 | return b; |
562 | } | 562 | } |
563 | EXPORT_SYMBOL(slob_alloc_node); | ||
564 | |||
565 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | ||
566 | { | ||
567 | return slob_alloc_node(cachep, flags, NUMA_NO_NODE); | ||
568 | } | ||
569 | EXPORT_SYMBOL(kmem_cache_alloc); | ||
570 | |||
571 | #ifdef CONFIG_NUMA | ||
572 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | ||
573 | { | ||
574 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | ||
575 | } | ||
576 | EXPORT_SYMBOL(__kmalloc_node); | ||
577 | |||
578 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) | ||
579 | { | ||
580 | return slob_alloc_node(cachep, gfp, node); | ||
581 | } | ||
563 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 582 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
583 | #endif | ||
564 | 584 | ||
565 | static void __kmem_cache_free(void *b, int size) | 585 | static void __kmem_cache_free(void *b, int size) |
566 | { | 586 | { |
@@ -2450,14 +2450,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | |||
2450 | return ret; | 2450 | return ret; |
2451 | } | 2451 | } |
2452 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 2452 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
2453 | |||
2454 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
2455 | { | ||
2456 | void *ret = kmalloc_order(size, flags, order); | ||
2457 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
2458 | return ret; | ||
2459 | } | ||
2460 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
2461 | #endif | 2453 | #endif |
2462 | 2454 | ||
2463 | #ifdef CONFIG_NUMA | 2455 | #ifdef CONFIG_NUMA |