aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c20
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c32
-rw-r--r--mm/slob.c10
-rw-r--r--mm/slub.c38
5 files changed, 34 insertions, 69 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e1790e56fd86..2c3a2e0394db 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2337,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2337 size &= ~(BYTES_PER_WORD - 1); 2337 size &= ~(BYTES_PER_WORD - 1);
2338 } 2338 }
2339 2339
2340 /* calculate the final buffer alignment: */
2341
2342 /* 1) arch recommendation: can be overridden for debug */
2343 if (flags & SLAB_HWCACHE_ALIGN) {
2344 /*
2345 * Default alignment: as specified by the arch code. Except if
2346 * an object is really small, then squeeze multiple objects into
2347 * one cacheline.
2348 */
2349 ralign = cache_line_size();
2350 while (size <= ralign / 2)
2351 ralign /= 2;
2352 } else {
2353 ralign = BYTES_PER_WORD;
2354 }
2355
2356 /* 2340 /*
2357 * Redzoning and user store require word alignment or possibly larger. 2341 * Redzoning and user store require word alignment or possibly larger.
2358 * Note this will be overridden by architecture or caller mandated 2342 * Note this will be overridden by architecture or caller mandated
@@ -2369,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2369 size &= ~(REDZONE_ALIGN - 1); 2353 size &= ~(REDZONE_ALIGN - 1);
2370 } 2354 }
2371 2355
2372 /* 2) arch mandated alignment */
2373 if (ralign < ARCH_SLAB_MINALIGN) {
2374 ralign = ARCH_SLAB_MINALIGN;
2375 }
2376 /* 3) caller mandated alignment */ 2356 /* 3) caller mandated alignment */
2377 if (ralign < cachep->align) { 2357 if (ralign < cachep->align) {
2378 ralign = cachep->align; 2358 ralign = cachep->align;
diff --git a/mm/slab.h b/mm/slab.h
index 492eafa0b538..1cb9c9ee0e6f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -32,6 +32,9 @@ extern struct list_head slab_caches;
32/* The slab cache that manages slab cache information */ 32/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache; 33extern struct kmem_cache *kmem_cache;
34 34
35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
35/* Functions provided by the slab allocators */ 38/* Functions provided by the slab allocators */
36extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 39extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
37 40
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 497b45c25bae..a8e76d79ee65 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -73,6 +73,34 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
73#endif 73#endif
74 74
75/* 75/*
76 * Figure out what the alignment of the objects will be given a set of
77 * flags, a user specified alignment and the size of the objects.
78 */
79unsigned long calculate_alignment(unsigned long flags,
80 unsigned long align, unsigned long size)
81{
82 /*
83 * If the user wants hardware cache aligned objects then follow that
84 * suggestion if the object is sufficiently large.
85 *
86 * The hardware cache alignment cannot override the specified
87 * alignment though. If that is greater then use it.
88 */
89 if (flags & SLAB_HWCACHE_ALIGN) {
90 unsigned long ralign = cache_line_size();
91 while (size <= ralign / 2)
92 ralign /= 2;
93 align = max(align, ralign);
94 }
95
96 if (align < ARCH_SLAB_MINALIGN)
97 align = ARCH_SLAB_MINALIGN;
98
99 return ALIGN(align, sizeof(void *));
100}
101
102
103/*
76 * kmem_cache_create - Create a cache. 104 * kmem_cache_create - Create a cache.
77 * @name: A string which is used in /proc/slabinfo to identify this cache. 105 * @name: A string which is used in /proc/slabinfo to identify this cache.
78 * @size: The size of objects to be created in this cache. 106 * @size: The size of objects to be created in this cache.
@@ -124,7 +152,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
124 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 152 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
125 if (s) { 153 if (s) {
126 s->object_size = s->size = size; 154 s->object_size = s->size = size;
127 s->align = align; 155 s->align = calculate_alignment(flags, align, size);
128 s->ctor = ctor; 156 s->ctor = ctor;
129 s->name = kstrdup(name, GFP_KERNEL); 157 s->name = kstrdup(name, GFP_KERNEL);
130 if (!s->name) { 158 if (!s->name) {
@@ -211,7 +239,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
211 239
212 s->name = name; 240 s->name = name;
213 s->size = s->object_size = size; 241 s->size = s->object_size = size;
214 s->align = ARCH_KMALLOC_MINALIGN; 242 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
215 err = __kmem_cache_create(s, flags); 243 err = __kmem_cache_create(s, flags);
216 244
217 if (err) 245 if (err)
diff --git a/mm/slob.c b/mm/slob.c
index 87e16c4d9143..795bab7d391d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -123,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp)
123 123
124#define SLOB_UNIT sizeof(slob_t) 124#define SLOB_UNIT sizeof(slob_t)
125#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) 125#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
126#define SLOB_ALIGN L1_CACHE_BYTES
127 126
128/* 127/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which 128 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -527,20 +526,11 @@ EXPORT_SYMBOL(ksize);
527 526
528int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) 527int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
529{ 528{
530 size_t align = c->size;
531
532 if (flags & SLAB_DESTROY_BY_RCU) { 529 if (flags & SLAB_DESTROY_BY_RCU) {
533 /* leave room for rcu footer at the end of object */ 530 /* leave room for rcu footer at the end of object */
534 c->size += sizeof(struct slob_rcu); 531 c->size += sizeof(struct slob_rcu);
535 } 532 }
536 c->flags = flags; 533 c->flags = flags;
537 /* ignore alignment unless it's forced */
538 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
539 if (c->align < ARCH_SLAB_MINALIGN)
540 c->align = ARCH_SLAB_MINALIGN;
541 if (c->align < align)
542 c->align = align;
543
544 return 0; 534 return 0;
545} 535}
546 536
diff --git a/mm/slub.c b/mm/slub.c
index c82453ac812a..9640edd2cc78 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
2760 return -ENOSYS; 2760 return -ENOSYS;
2761} 2761}
2762 2762
2763/*
2764 * Figure out what the alignment of the objects will be.
2765 */
2766static unsigned long calculate_alignment(unsigned long flags,
2767 unsigned long align, unsigned long size)
2768{
2769 /*
2770 * If the user wants hardware cache aligned objects then follow that
2771 * suggestion if the object is sufficiently large.
2772 *
2773 * The hardware cache alignment cannot override the specified
2774 * alignment though. If that is greater then use it.
2775 */
2776 if (flags & SLAB_HWCACHE_ALIGN) {
2777 unsigned long ralign = cache_line_size();
2778 while (size <= ralign / 2)
2779 ralign /= 2;
2780 align = max(align, ralign);
2781 }
2782
2783 if (align < ARCH_SLAB_MINALIGN)
2784 align = ARCH_SLAB_MINALIGN;
2785
2786 return ALIGN(align, sizeof(void *));
2787}
2788
2789static void 2763static void
2790init_kmem_cache_node(struct kmem_cache_node *n) 2764init_kmem_cache_node(struct kmem_cache_node *n)
2791{ 2765{
@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2919{ 2893{
2920 unsigned long flags = s->flags; 2894 unsigned long flags = s->flags;
2921 unsigned long size = s->object_size; 2895 unsigned long size = s->object_size;
2922 unsigned long align = s->align;
2923 int order; 2896 int order;
2924 2897
2925 /* 2898 /*
@@ -2991,19 +2964,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2991#endif 2964#endif
2992 2965
2993 /* 2966 /*
2994 * Determine the alignment based on various parameters that the
2995 * user specified and the dynamic determination of cache line size
2996 * on bootup.
2997 */
2998 align = calculate_alignment(flags, align, s->object_size);
2999 s->align = align;
3000
3001 /*
3002 * SLUB stores one object immediately after another beginning from 2967 * SLUB stores one object immediately after another beginning from
3003 * offset 0. In order to align the objects we have to simply size 2968 * offset 0. In order to align the objects we have to simply size
3004 * each object to conform to the alignment. 2969 * each object to conform to the alignment.
3005 */ 2970 */
3006 size = ALIGN(size, align); 2971 size = ALIGN(size, s->align);
3007 s->size = size; 2972 s->size = size;
3008 if (forced_order >= 0) 2973 if (forced_order >= 0)
3009 order = forced_order; 2974 order = forced_order;
@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3032 s->max = s->oo; 2997 s->max = s->oo;
3033 2998
3034 return !!oo_objects(s->oo); 2999 return !!oo_objects(s->oo);
3035
3036} 3000}
3037 3001
3038static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) 3002static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)