aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-06-13 11:24:57 -0400
committerPekka Enberg <penberg@kernel.org>2012-06-14 02:20:16 -0400
commit3b0efdfa1e719303536c04d9abca43abeb40f80a (patch)
tree6a429eebb3febe5cc2101615ec7c7ea4d10fd97b /mm/slub.c
parent350260889b251821e770573dfd65cd851b4ef781 (diff)
mm, sl[aou]b: Extract common fields from struct kmem_cache
Define a struct that describes common fields used in all slab allocators. A slab allocator either uses the common definition (like SLOB) or is required to provide members of kmem_cache with the definition given. After that it will be possible to share code that only operates on those fields of kmem_cache. The patch basically takes the slob definition of kmem cache and uses the field namees for the other allocators. It also standardizes the names used for basic object lengths in allocators: object_size Struct size specified at kmem_cache_create. Basically the payload expected to be used by the subsystem. size The size of memory allocator for each object. This size is larger than object_size and includes padding, alignment and extra metadata for each object (f.e. for debugging and rcu). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2de3c996f327..797271f5afb8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -311,7 +311,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
311 * and whatever may come after it. 311 * and whatever may come after it.
312 */ 312 */
313 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 313 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
314 return s->objsize; 314 return s->object_size;
315 315
316#endif 316#endif
317 /* 317 /*
@@ -609,11 +609,11 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
609 if (p > addr + 16) 609 if (p > addr + 16)
610 print_section("Bytes b4 ", p - 16, 16); 610 print_section("Bytes b4 ", p - 16, 16);
611 611
612 print_section("Object ", p, min_t(unsigned long, s->objsize, 612 print_section("Object ", p, min_t(unsigned long, s->object_size,
613 PAGE_SIZE)); 613 PAGE_SIZE));
614 if (s->flags & SLAB_RED_ZONE) 614 if (s->flags & SLAB_RED_ZONE)
615 print_section("Redzone ", p + s->objsize, 615 print_section("Redzone ", p + s->object_size,
616 s->inuse - s->objsize); 616 s->inuse - s->object_size);
617 617
618 if (s->offset) 618 if (s->offset)
619 off = s->offset + sizeof(void *); 619 off = s->offset + sizeof(void *);
@@ -655,12 +655,12 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
655 u8 *p = object; 655 u8 *p = object;
656 656
657 if (s->flags & __OBJECT_POISON) { 657 if (s->flags & __OBJECT_POISON) {
658 memset(p, POISON_FREE, s->objsize - 1); 658 memset(p, POISON_FREE, s->object_size - 1);
659 p[s->objsize - 1] = POISON_END; 659 p[s->object_size - 1] = POISON_END;
660 } 660 }
661 661
662 if (s->flags & SLAB_RED_ZONE) 662 if (s->flags & SLAB_RED_ZONE)
663 memset(p + s->objsize, val, s->inuse - s->objsize); 663 memset(p + s->object_size, val, s->inuse - s->object_size);
664} 664}
665 665
666static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 666static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
@@ -705,10 +705,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
705 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 705 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
706 * 0xa5 (POISON_END) 706 * 0xa5 (POISON_END)
707 * 707 *
708 * object + s->objsize 708 * object + s->object_size
709 * Padding to reach word boundary. This is also used for Redzoning. 709 * Padding to reach word boundary. This is also used for Redzoning.
710 * Padding is extended by another word if Redzoning is enabled and 710 * Padding is extended by another word if Redzoning is enabled and
711 * objsize == inuse. 711 * object_size == inuse.
712 * 712 *
713 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 713 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
714 * 0xcc (RED_ACTIVE) for objects in use. 714 * 0xcc (RED_ACTIVE) for objects in use.
@@ -727,7 +727,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
727 * object + s->size 727 * object + s->size
728 * Nothing is used beyond s->size. 728 * Nothing is used beyond s->size.
729 * 729 *
730 * If slabcaches are merged then the objsize and inuse boundaries are mostly 730 * If slabcaches are merged then the object_size and inuse boundaries are mostly
731 * ignored. And therefore no slab options that rely on these boundaries 731 * ignored. And therefore no slab options that rely on these boundaries
732 * may be used with merged slabcaches. 732 * may be used with merged slabcaches.
733 */ 733 */
@@ -787,25 +787,25 @@ static int check_object(struct kmem_cache *s, struct page *page,
787 void *object, u8 val) 787 void *object, u8 val)
788{ 788{
789 u8 *p = object; 789 u8 *p = object;
790 u8 *endobject = object + s->objsize; 790 u8 *endobject = object + s->object_size;
791 791
792 if (s->flags & SLAB_RED_ZONE) { 792 if (s->flags & SLAB_RED_ZONE) {
793 if (!check_bytes_and_report(s, page, object, "Redzone", 793 if (!check_bytes_and_report(s, page, object, "Redzone",
794 endobject, val, s->inuse - s->objsize)) 794 endobject, val, s->inuse - s->object_size))
795 return 0; 795 return 0;
796 } else { 796 } else {
797 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 797 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
798 check_bytes_and_report(s, page, p, "Alignment padding", 798 check_bytes_and_report(s, page, p, "Alignment padding",
799 endobject, POISON_INUSE, s->inuse - s->objsize); 799 endobject, POISON_INUSE, s->inuse - s->object_size);
800 } 800 }
801 } 801 }
802 802
803 if (s->flags & SLAB_POISON) { 803 if (s->flags & SLAB_POISON) {
804 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 804 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
805 (!check_bytes_and_report(s, page, p, "Poison", p, 805 (!check_bytes_and_report(s, page, p, "Poison", p,
806 POISON_FREE, s->objsize - 1) || 806 POISON_FREE, s->object_size - 1) ||
807 !check_bytes_and_report(s, page, p, "Poison", 807 !check_bytes_and_report(s, page, p, "Poison",
808 p + s->objsize - 1, POISON_END, 1))) 808 p + s->object_size - 1, POISON_END, 1)))
809 return 0; 809 return 0;
810 /* 810 /*
811 * check_pad_bytes cleans up on its own. 811 * check_pad_bytes cleans up on its own.
@@ -926,7 +926,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
926 page->freelist); 926 page->freelist);
927 927
928 if (!alloc) 928 if (!alloc)
929 print_section("Object ", (void *)object, s->objsize); 929 print_section("Object ", (void *)object, s->object_size);
930 930
931 dump_stack(); 931 dump_stack();
932 } 932 }
@@ -942,14 +942,14 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
942 lockdep_trace_alloc(flags); 942 lockdep_trace_alloc(flags);
943 might_sleep_if(flags & __GFP_WAIT); 943 might_sleep_if(flags & __GFP_WAIT);
944 944
945 return should_failslab(s->objsize, flags, s->flags); 945 return should_failslab(s->object_size, flags, s->flags);
946} 946}
947 947
948static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 948static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
949{ 949{
950 flags &= gfp_allowed_mask; 950 flags &= gfp_allowed_mask;
951 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 951 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
952 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 952 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
953} 953}
954 954
955static inline void slab_free_hook(struct kmem_cache *s, void *x) 955static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -966,13 +966,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
966 unsigned long flags; 966 unsigned long flags;
967 967
968 local_irq_save(flags); 968 local_irq_save(flags);
969 kmemcheck_slab_free(s, x, s->objsize); 969 kmemcheck_slab_free(s, x, s->object_size);
970 debug_check_no_locks_freed(x, s->objsize); 970 debug_check_no_locks_freed(x, s->object_size);
971 local_irq_restore(flags); 971 local_irq_restore(flags);
972 } 972 }
973#endif 973#endif
974 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 974 if (!(s->flags & SLAB_DEBUG_OBJECTS))
975 debug_check_no_obj_freed(x, s->objsize); 975 debug_check_no_obj_freed(x, s->object_size);
976} 976}
977 977
978/* 978/*
@@ -1207,7 +1207,7 @@ out:
1207 1207
1208__setup("slub_debug", setup_slub_debug); 1208__setup("slub_debug", setup_slub_debug);
1209 1209
1210static unsigned long kmem_cache_flags(unsigned long objsize, 1210static unsigned long kmem_cache_flags(unsigned long object_size,
1211 unsigned long flags, const char *name, 1211 unsigned long flags, const char *name,
1212 void (*ctor)(void *)) 1212 void (*ctor)(void *))
1213{ 1213{
@@ -1237,7 +1237,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
1237static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1237static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1238 struct page *page) {} 1238 struct page *page) {}
1239static inline void remove_full(struct kmem_cache *s, struct page *page) {} 1239static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1240static inline unsigned long kmem_cache_flags(unsigned long objsize, 1240static inline unsigned long kmem_cache_flags(unsigned long object_size,
1241 unsigned long flags, const char *name, 1241 unsigned long flags, const char *name,
1242 void (*ctor)(void *)) 1242 void (*ctor)(void *))
1243{ 1243{
@@ -2098,10 +2098,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2098 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 2098 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2099 nid, gfpflags); 2099 nid, gfpflags);
2100 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " 2100 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
2101 "default order: %d, min order: %d\n", s->name, s->objsize, 2101 "default order: %d, min order: %d\n", s->name, s->object_size,
2102 s->size, oo_order(s->oo), oo_order(s->min)); 2102 s->size, oo_order(s->oo), oo_order(s->min));
2103 2103
2104 if (oo_order(s->min) > get_order(s->objsize)) 2104 if (oo_order(s->min) > get_order(s->object_size))
2105 printk(KERN_WARNING " %s debugging increased min order, use " 2105 printk(KERN_WARNING " %s debugging increased min order, use "
2106 "slub_debug=O to disable.\n", s->name); 2106 "slub_debug=O to disable.\n", s->name);
2107 2107
@@ -2374,7 +2374,7 @@ redo:
2374 } 2374 }
2375 2375
2376 if (unlikely(gfpflags & __GFP_ZERO) && object) 2376 if (unlikely(gfpflags & __GFP_ZERO) && object)
2377 memset(object, 0, s->objsize); 2377 memset(object, 0, s->object_size);
2378 2378
2379 slab_post_alloc_hook(s, gfpflags, object); 2379 slab_post_alloc_hook(s, gfpflags, object);
2380 2380
@@ -2385,7 +2385,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2385{ 2385{
2386 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2386 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2387 2387
2388 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 2388 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
2389 2389
2390 return ret; 2390 return ret;
2391} 2391}
@@ -2415,7 +2415,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2415 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2415 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2416 2416
2417 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2417 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2418 s->objsize, s->size, gfpflags, node); 2418 s->object_size, s->size, gfpflags, node);
2419 2419
2420 return ret; 2420 return ret;
2421} 2421}
@@ -2910,7 +2910,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
2910static int calculate_sizes(struct kmem_cache *s, int forced_order) 2910static int calculate_sizes(struct kmem_cache *s, int forced_order)
2911{ 2911{
2912 unsigned long flags = s->flags; 2912 unsigned long flags = s->flags;
2913 unsigned long size = s->objsize; 2913 unsigned long size = s->object_size;
2914 unsigned long align = s->align; 2914 unsigned long align = s->align;
2915 int order; 2915 int order;
2916 2916
@@ -2939,7 +2939,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2939 * end of the object and the free pointer. If not then add an 2939 * end of the object and the free pointer. If not then add an
2940 * additional word to have some bytes to store Redzone information. 2940 * additional word to have some bytes to store Redzone information.
2941 */ 2941 */
2942 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2942 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
2943 size += sizeof(void *); 2943 size += sizeof(void *);
2944#endif 2944#endif
2945 2945
@@ -2987,7 +2987,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2987 * user specified and the dynamic determination of cache line size 2987 * user specified and the dynamic determination of cache line size
2988 * on bootup. 2988 * on bootup.
2989 */ 2989 */
2990 align = calculate_alignment(flags, align, s->objsize); 2990 align = calculate_alignment(flags, align, s->object_size);
2991 s->align = align; 2991 s->align = align;
2992 2992
2993 /* 2993 /*
@@ -3035,7 +3035,7 @@ static int kmem_cache_open(struct kmem_cache *s,
3035 memset(s, 0, kmem_size); 3035 memset(s, 0, kmem_size);
3036 s->name = name; 3036 s->name = name;
3037 s->ctor = ctor; 3037 s->ctor = ctor;
3038 s->objsize = size; 3038 s->object_size = size;
3039 s->align = align; 3039 s->align = align;
3040 s->flags = kmem_cache_flags(size, flags, name, ctor); 3040 s->flags = kmem_cache_flags(size, flags, name, ctor);
3041 s->reserved = 0; 3041 s->reserved = 0;
@@ -3050,7 +3050,7 @@ static int kmem_cache_open(struct kmem_cache *s,
3050 * Disable debugging flags that store metadata if the min slab 3050 * Disable debugging flags that store metadata if the min slab
3051 * order increased. 3051 * order increased.
3052 */ 3052 */
3053 if (get_order(s->size) > get_order(s->objsize)) { 3053 if (get_order(s->size) > get_order(s->object_size)) {
3054 s->flags &= ~DEBUG_METADATA_FLAGS; 3054 s->flags &= ~DEBUG_METADATA_FLAGS;
3055 s->offset = 0; 3055 s->offset = 0;
3056 if (!calculate_sizes(s, -1)) 3056 if (!calculate_sizes(s, -1))
@@ -3124,7 +3124,7 @@ error:
3124 */ 3124 */
3125unsigned int kmem_cache_size(struct kmem_cache *s) 3125unsigned int kmem_cache_size(struct kmem_cache *s)
3126{ 3126{
3127 return s->objsize; 3127 return s->object_size;
3128} 3128}
3129EXPORT_SYMBOL(kmem_cache_size); 3129EXPORT_SYMBOL(kmem_cache_size);
3130 3130
@@ -3853,11 +3853,11 @@ void __init kmem_cache_init(void)
3853 3853
3854 if (s && s->size) { 3854 if (s && s->size) {
3855 char *name = kasprintf(GFP_NOWAIT, 3855 char *name = kasprintf(GFP_NOWAIT,
3856 "dma-kmalloc-%d", s->objsize); 3856 "dma-kmalloc-%d", s->object_size);
3857 3857
3858 BUG_ON(!name); 3858 BUG_ON(!name);
3859 kmalloc_dma_caches[i] = create_kmalloc_cache(name, 3859 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3860 s->objsize, SLAB_CACHE_DMA); 3860 s->object_size, SLAB_CACHE_DMA);
3861 } 3861 }
3862 } 3862 }
3863#endif 3863#endif
@@ -3951,7 +3951,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3951 * Adjust the object sizes so that we clear 3951 * Adjust the object sizes so that we clear
3952 * the complete object on kzalloc. 3952 * the complete object on kzalloc.
3953 */ 3953 */
3954 s->objsize = max(s->objsize, (int)size); 3954 s->object_size = max(s->object_size, (int)size);
3955 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3955 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3956 3956
3957 if (sysfs_slab_alias(s, name)) { 3957 if (sysfs_slab_alias(s, name)) {
@@ -4634,7 +4634,7 @@ SLAB_ATTR_RO(align);
4634 4634
4635static ssize_t object_size_show(struct kmem_cache *s, char *buf) 4635static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4636{ 4636{
4637 return sprintf(buf, "%d\n", s->objsize); 4637 return sprintf(buf, "%d\n", s->object_size);
4638} 4638}
4639SLAB_ATTR_RO(object_size); 4639SLAB_ATTR_RO(object_size);
4640 4640
@@ -5438,7 +5438,7 @@ __initcall(slab_sysfs_init);
5438static void print_slabinfo_header(struct seq_file *m) 5438static void print_slabinfo_header(struct seq_file *m)
5439{ 5439{
5440 seq_puts(m, "slabinfo - version: 2.1\n"); 5440 seq_puts(m, "slabinfo - version: 2.1\n");
5441 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 5441 seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
5442 "<objperslab> <pagesperslab>"); 5442 "<objperslab> <pagesperslab>");
5443 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 5443 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5444 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 5444 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");