aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 217b5b5338a2..ea6f0390996f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,6 +281,30 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
292 return s->objsize;
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
284static inline struct kmem_cache_order_objects oo_make(int order, 308static inline struct kmem_cache_order_objects oo_make(int order,
285 unsigned long size) 309 unsigned long size)
286{ 310{
@@ -797,30 +821,6 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
797 return should_failslab(s->objsize, flags, s->flags); 821 return should_failslab(s->objsize, flags, s->flags);
798} 822}
799 823
800static inline size_t slab_ksize(const struct kmem_cache *s)
801{
802#ifdef CONFIG_SLUB_DEBUG
803 /*
804 * Debugging requires use of the padding between object
805 * and whatever may come after it.
806 */
807 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
808 return s->objsize;
809
810#endif
811 /*
812 * If we have the need to store the freelist pointer
813 * back there or track user information then we can
814 * only use the space before that information.
815 */
816 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
817 return s->inuse;
818 /*
819 * Else we can use all the padding etc for the allocation
820 */
821 return s->size;
822}
823
824static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 824static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
825{ 825{
826 flags &= gfp_allowed_mask; 826 flags &= gfp_allowed_mask;