diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 14 |
1 files changed, 8 insertions, 6 deletions
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2123 | * | 2123 | * |
2124 | * @name must be valid until the cache is destroyed. This implies that | 2124 | * @name must be valid until the cache is destroyed. This implies that |
2125 | * the module calling this has to destroy the cache before getting unloaded. | 2125 | * the module calling this has to destroy the cache before getting unloaded. |
2126 | * Note that kmem_cache_name() is not guaranteed to return the same pointer, | ||
2127 | * therefore applications must manage it themselves. | ||
2126 | * | 2128 | * |
2127 | * The flags are | 2129 | * The flags are |
2128 | * | 2130 | * |
@@ -2609,7 +2611,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
2609 | if (OFF_SLAB(cachep)) { | 2611 | if (OFF_SLAB(cachep)) { |
2610 | /* Slab management obj is off-slab. */ | 2612 | /* Slab management obj is off-slab. */ |
2611 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 2613 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
2612 | local_flags & ~GFP_THISNODE, nodeid); | 2614 | local_flags, nodeid); |
2613 | if (!slabp) | 2615 | if (!slabp) |
2614 | return NULL; | 2616 | return NULL; |
2615 | } else { | 2617 | } else { |
@@ -2997,7 +2999,7 @@ retry: | |||
2997 | * there must be at least one object available for | 2999 | * there must be at least one object available for |
2998 | * allocation. | 3000 | * allocation. |
2999 | */ | 3001 | */ |
3000 | BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); | 3002 | BUG_ON(slabp->inuse >= cachep->num); |
3001 | 3003 | ||
3002 | while (slabp->inuse < cachep->num && batchcount--) { | 3004 | while (slabp->inuse < cachep->num && batchcount--) { |
3003 | STATS_INC_ALLOCED(cachep); | 3005 | STATS_INC_ALLOCED(cachep); |
@@ -3621,9 +3623,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3621 | EXPORT_SYMBOL(__kmalloc_node); | 3623 | EXPORT_SYMBOL(__kmalloc_node); |
3622 | 3624 | ||
3623 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3625 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
3624 | int node, void *caller) | 3626 | int node, unsigned long caller) |
3625 | { | 3627 | { |
3626 | return __do_kmalloc_node(size, flags, node, caller); | 3628 | return __do_kmalloc_node(size, flags, node, (void *)caller); |
3627 | } | 3629 | } |
3628 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3630 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
3629 | #else | 3631 | #else |
@@ -3665,9 +3667,9 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3665 | } | 3667 | } |
3666 | EXPORT_SYMBOL(__kmalloc); | 3668 | EXPORT_SYMBOL(__kmalloc); |
3667 | 3669 | ||
3668 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3670 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
3669 | { | 3671 | { |
3670 | return __do_kmalloc(size, flags, caller); | 3672 | return __do_kmalloc(size, flags, (void *)caller); |
3671 | } | 3673 | } |
3672 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3674 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3673 | 3675 | ||