aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-11-28 11:23:16 -0500
committerPekka Enberg <penberg@kernel.org>2012-12-11 05:14:28 -0500
commit4590685546a374fb0f60682ce0e3a6fd48911d46 (patch)
tree1287ce1e1633067f8bf2cf9f93f1d6fe8a1f8908 /mm/slub.c
parent2f9baa9fcf8d0a204ca129a671d6086cc100faab (diff)
mm/sl[aou]b: Common alignment code
Extract the code to do object alignment from the allocators. Do the alignment calculations in slab_common so that the __kmem_cache_create functions of the allocators do not have to deal with alignment. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 1 insertions, 37 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c82453ac812a..9640edd2cc78 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
2760 return -ENOSYS; 2760 return -ENOSYS;
2761} 2761}
2762 2762
2763/*
2764 * Figure out what the alignment of the objects will be.
2765 */
2766static unsigned long calculate_alignment(unsigned long flags,
2767 unsigned long align, unsigned long size)
2768{
2769 /*
2770 * If the user wants hardware cache aligned objects then follow that
2771 * suggestion if the object is sufficiently large.
2772 *
2773 * The hardware cache alignment cannot override the specified
2774 * alignment though. If that is greater then use it.
2775 */
2776 if (flags & SLAB_HWCACHE_ALIGN) {
2777 unsigned long ralign = cache_line_size();
2778 while (size <= ralign / 2)
2779 ralign /= 2;
2780 align = max(align, ralign);
2781 }
2782
2783 if (align < ARCH_SLAB_MINALIGN)
2784 align = ARCH_SLAB_MINALIGN;
2785
2786 return ALIGN(align, sizeof(void *));
2787}
2788
2789static void 2763static void
2790init_kmem_cache_node(struct kmem_cache_node *n) 2764init_kmem_cache_node(struct kmem_cache_node *n)
2791{ 2765{
@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2919{ 2893{
2920 unsigned long flags = s->flags; 2894 unsigned long flags = s->flags;
2921 unsigned long size = s->object_size; 2895 unsigned long size = s->object_size;
2922 unsigned long align = s->align;
2923 int order; 2896 int order;
2924 2897
2925 /* 2898 /*
@@ -2991,19 +2964,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2991#endif 2964#endif
2992 2965
2993 /* 2966 /*
2994 * Determine the alignment based on various parameters that the
2995 * user specified and the dynamic determination of cache line size
2996 * on bootup.
2997 */
2998 align = calculate_alignment(flags, align, s->object_size);
2999 s->align = align;
3000
3001 /*
3002 * SLUB stores one object immediately after another beginning from 2967 * SLUB stores one object immediately after another beginning from
3003 * offset 0. In order to align the objects we have to simply size 2968 * offset 0. In order to align the objects we have to simply size
3004 * each object to conform to the alignment. 2969 * each object to conform to the alignment.
3005 */ 2970 */
3006 size = ALIGN(size, align); 2971 size = ALIGN(size, s->align);
3007 s->size = size; 2972 s->size = size;
3008 if (forced_order >= 0) 2973 if (forced_order >= 0)
3009 order = forced_order; 2974 order = forced_order;
@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3032 s->max = s->oo; 2997 s->max = s->oo;
3033 2998
3034 return !!oo_objects(s->oo); 2999 return !!oo_objects(s->oo);
3035
3036} 3000}
3037 3001
3038static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) 3002static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)