aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-14 17:16:36 -0400
committerPekka Enberg <penberg@kernel.org>2011-06-16 12:40:20 -0400
commit3192b920bf7d0c528ab54e7d3689f44055316a37 (patch)
treef907471f606a17cef0c1af4bc2187cdd4ee26c64
parentbd50cfa89153a67429935a15e577a5eb5f10dd1b (diff)
slab, slub, slob: Unify alignment definition
Every slab has its on alignment definition in include/linux/sl?b_def.h. Extract those and define a common set in include/linux/slab.h. SLOB: As notes sometimes we need double word alignment on 32 bit. This gives all structures allocated by SLOB a unsigned long long alignment like the others do. SLAB: If ARCH_SLAB_MINALIGN is not set SLAB would set ARCH_SLAB_MINALIGN to zero meaning no alignment at all. Give it the default unsigned long long alignment. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/slab_def.h26
-rw-r--r--include/linux/slob_def.h10
-rw-r--r--include/linux/slub_def.h10
4 files changed, 10 insertions, 46 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ad4dd1c8d30a..646a639a4aae 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -133,6 +133,16 @@ unsigned int kmem_cache_size(struct kmem_cache *);
133#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 133#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
134#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 134#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
135 135
136#ifdef ARCH_DMA_MINALIGN
137#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
138#else
139#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
140#endif
141
142#ifndef ARCH_SLAB_MINALIGN
143#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
144#endif
145
136/* 146/*
137 * Common kmalloc functions provided by all allocators 147 * Common kmalloc functions provided by all allocators
138 */ 148 */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 83203ae9390b..d7f63112f63c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -18,32 +18,6 @@
18#include <trace/events/kmem.h> 18#include <trace/events/kmem.h>
19 19
20/* 20/*
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
28 */
29#ifdef ARCH_DMA_MINALIGN
30#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
31#else
32#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
33#endif
34
35#ifndef ARCH_SLAB_MINALIGN
36/*
37 * Enforce a minimum alignment for all caches.
38 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
39 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
40 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
41 * some debug features.
42 */
43#define ARCH_SLAB_MINALIGN 0
44#endif
45
46/*
47 * struct kmem_cache 21 * struct kmem_cache
48 * 22 *
49 * manages a cache. 23 * manages a cache.
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 4382db09df4f..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -1,16 +1,6 @@
1#ifndef __LINUX_SLOB_DEF_H 1#ifndef __LINUX_SLOB_DEF_H
2#define __LINUX_SLOB_DEF_H 2#define __LINUX_SLOB_DEF_H
3 3
4#ifdef ARCH_DMA_MINALIGN
5#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
6#else
7#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
8#endif
9
10#ifndef ARCH_SLAB_MINALIGN
11#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
12#endif
13
14void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
15 5
16static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c8668d161dd8..fd4fdc72bc8c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -113,16 +113,6 @@ struct kmem_cache {
113 113
114#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 114#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
115 115
116#ifdef ARCH_DMA_MINALIGN
117#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
118#else
119#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
120#endif
121
122#ifndef ARCH_SLAB_MINALIGN
123#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
124#endif
125
126/* 116/*
127 * Maximum kmalloc object size handled by SLUB. Larger object allocations 117 * Maximum kmalloc object size handled by SLUB. Larger object allocations
128 * are passed through to the page allocator. The page allocator "fastpath" 118 * are passed through to the page allocator. The page allocator "fastpath"