aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ebba3eb19369..e841d8921c22 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,6 +281,30 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
292 return s->objsize;
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
284static inline int order_objects(int order, unsigned long size, int reserved) 308static inline int order_objects(int order, unsigned long size, int reserved)
285{ 309{
286 return ((PAGE_SIZE << order) - reserved) / size; 310 return ((PAGE_SIZE << order) - reserved) / size;
@@ -805,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
805static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 829static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
806{ 830{
807 flags &= gfp_allowed_mask; 831 flags &= gfp_allowed_mask;
808 kmemcheck_slab_alloc(s, flags, object, s->objsize); 832 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
809 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 833 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
810} 834}
811 835
@@ -2425,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
2425} 2449}
2426EXPORT_SYMBOL(kmem_cache_size); 2450EXPORT_SYMBOL(kmem_cache_size);
2427 2451
2428const char *kmem_cache_name(struct kmem_cache *s)
2429{
2430 return s->name;
2431}
2432EXPORT_SYMBOL(kmem_cache_name);
2433
2434static void list_slab_objects(struct kmem_cache *s, struct page *page, 2452static void list_slab_objects(struct kmem_cache *s, struct page *page,
2435 const char *text) 2453 const char *text)
2436{ 2454{
@@ -2722,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
2722size_t ksize(const void *object) 2740size_t ksize(const void *object)
2723{ 2741{
2724 struct page *page; 2742 struct page *page;
2725 struct kmem_cache *s;
2726 2743
2727 if (unlikely(object == ZERO_SIZE_PTR)) 2744 if (unlikely(object == ZERO_SIZE_PTR))
2728 return 0; 2745 return 0;
@@ -2733,28 +2750,8 @@ size_t ksize(const void *object)
2733 WARN_ON(!PageCompound(page)); 2750 WARN_ON(!PageCompound(page));
2734 return PAGE_SIZE << compound_order(page); 2751 return PAGE_SIZE << compound_order(page);
2735 } 2752 }
2736 s = page->slab;
2737
2738#ifdef CONFIG_SLUB_DEBUG
2739 /*
2740 * Debugging requires use of the padding between object
2741 * and whatever may come after it.
2742 */
2743 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2744 return s->objsize;
2745 2753
2746#endif 2754 return slab_ksize(page->slab);
2747 /*
2748 * If we have the need to store the freelist pointer
2749 * back there or track user information then we can
2750 * only use the space before that information.
2751 */
2752 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2753 return s->inuse;
2754 /*
2755 * Else we can use all the padding etc for the allocation
2756 */
2757 return s->size;
2758} 2755}
2759EXPORT_SYMBOL(ksize); 2756EXPORT_SYMBOL(ksize);
2760 2757