aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c55
3 files changed, 26 insertions, 43 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 52cf0b4634d4..7d92f08b88d7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2150,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2150 * 2150 *
2151 * @name must be valid until the cache is destroyed. This implies that 2151 * @name must be valid until the cache is destroyed. This implies that
2152 * the module calling this has to destroy the cache before getting unloaded. 2152 * the module calling this has to destroy the cache before getting unloaded.
2153 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2154 * therefore applications must manage it themselves.
2155 * 2153 *
2156 * The flags are 2154 * The flags are
2157 * 2155 *
@@ -3843,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep)
3843} 3841}
3844EXPORT_SYMBOL(kmem_cache_size); 3842EXPORT_SYMBOL(kmem_cache_size);
3845 3843
3846const char *kmem_cache_name(struct kmem_cache *cachep)
3847{
3848 return cachep->name;
3849}
3850EXPORT_SYMBOL_GPL(kmem_cache_name);
3851
3852/* 3844/*
3853 * This initializes kmem_list3 or resizes various caches for all nodes. 3845 * This initializes kmem_list3 or resizes various caches for all nodes.
3854 */ 3846 */
diff --git a/mm/slob.c b/mm/slob.c
index 3588eaaef726..46e0aee33a23 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
666} 666}
667EXPORT_SYMBOL(kmem_cache_size); 667EXPORT_SYMBOL(kmem_cache_size);
668 668
669const char *kmem_cache_name(struct kmem_cache *c)
670{
671 return c->name;
672}
673EXPORT_SYMBOL(kmem_cache_name);
674
675int kmem_cache_shrink(struct kmem_cache *d) 669int kmem_cache_shrink(struct kmem_cache *d)
676{ 670{
677 return 0; 671 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index ebba3eb19369..e841d8921c22 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,6 +281,30 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
292 return s->objsize;
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
284static inline int order_objects(int order, unsigned long size, int reserved) 308static inline int order_objects(int order, unsigned long size, int reserved)
285{ 309{
286 return ((PAGE_SIZE << order) - reserved) / size; 310 return ((PAGE_SIZE << order) - reserved) / size;
@@ -805,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
805static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 829static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
806{ 830{
807 flags &= gfp_allowed_mask; 831 flags &= gfp_allowed_mask;
808 kmemcheck_slab_alloc(s, flags, object, s->objsize); 832 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
809 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 833 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
810} 834}
811 835
@@ -2425,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
2425} 2449}
2426EXPORT_SYMBOL(kmem_cache_size); 2450EXPORT_SYMBOL(kmem_cache_size);
2427 2451
2428const char *kmem_cache_name(struct kmem_cache *s)
2429{
2430 return s->name;
2431}
2432EXPORT_SYMBOL(kmem_cache_name);
2433
2434static void list_slab_objects(struct kmem_cache *s, struct page *page, 2452static void list_slab_objects(struct kmem_cache *s, struct page *page,
2435 const char *text) 2453 const char *text)
2436{ 2454{
@@ -2722,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
2722size_t ksize(const void *object) 2740size_t ksize(const void *object)
2723{ 2741{
2724 struct page *page; 2742 struct page *page;
2725 struct kmem_cache *s;
2726 2743
2727 if (unlikely(object == ZERO_SIZE_PTR)) 2744 if (unlikely(object == ZERO_SIZE_PTR))
2728 return 0; 2745 return 0;
@@ -2733,28 +2750,8 @@ size_t ksize(const void *object)
2733 WARN_ON(!PageCompound(page)); 2750 WARN_ON(!PageCompound(page));
2734 return PAGE_SIZE << compound_order(page); 2751 return PAGE_SIZE << compound_order(page);
2735 } 2752 }
2736 s = page->slab;
2737
2738#ifdef CONFIG_SLUB_DEBUG
2739 /*
2740 * Debugging requires use of the padding between object
2741 * and whatever may come after it.
2742 */
2743 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2744 return s->objsize;
2745 2753
2746#endif 2754 return slab_ksize(page->slab);
2747 /*
2748 * If we have the need to store the freelist pointer
2749 * back there or track user information then we can
2750 * only use the space before that information.
2751 */
2752 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2753 return s->inuse;
2754 /*
2755 * Else we can use all the padding etc for the allocation
2756 */
2757 return s->size;
2758} 2755}
2759EXPORT_SYMBOL(ksize); 2756EXPORT_SYMBOL(ksize);
2760 2757