diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/slab.h | 10 | ||||
-rw-r--r-- | include/linux/slab_def.h | 26 | ||||
-rw-r--r-- | include/linux/slob_def.h | 10 | ||||
-rw-r--r-- | include/linux/slub_def.h | 10 |
4 files changed, 10 insertions, 46 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index ad4dd1c8d30a..646a639a4aae 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -133,6 +133,16 @@ unsigned int kmem_cache_size(struct kmem_cache *); | |||
133 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) | 133 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) |
134 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) | 134 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) |
135 | 135 | ||
136 | #ifdef ARCH_DMA_MINALIGN | ||
137 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
138 | #else | ||
139 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
140 | #endif | ||
141 | |||
142 | #ifndef ARCH_SLAB_MINALIGN | ||
143 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
144 | #endif | ||
145 | |||
136 | /* | 146 | /* |
137 | * Common kmalloc functions provided by all allocators | 147 | * Common kmalloc functions provided by all allocators |
138 | */ | 148 | */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 83203ae9390b..d7f63112f63c 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -18,32 +18,6 @@ | |||
18 | #include <trace/events/kmem.h> | 18 | #include <trace/events/kmem.h> |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Enforce a minimum alignment for the kmalloc caches. | ||
22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | ||
23 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. | ||
24 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | ||
25 | * alignment larger than the alignment of a 64-bit integer. | ||
26 | * ARCH_KMALLOC_MINALIGN allows that. | ||
27 | * Note that increasing this value may disable some debug features. | ||
28 | */ | ||
29 | #ifdef ARCH_DMA_MINALIGN | ||
30 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
31 | #else | ||
32 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
33 | #endif | ||
34 | |||
35 | #ifndef ARCH_SLAB_MINALIGN | ||
36 | /* | ||
37 | * Enforce a minimum alignment for all caches. | ||
38 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD | ||
39 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. | ||
40 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables | ||
41 | * some debug features. | ||
42 | */ | ||
43 | #define ARCH_SLAB_MINALIGN 0 | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * struct kmem_cache | 21 | * struct kmem_cache |
48 | * | 22 | * |
49 | * manages a cache. | 23 | * manages a cache. |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 4382db09df4f..0ec00b39d006 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -1,16 +1,6 @@ | |||
1 | #ifndef __LINUX_SLOB_DEF_H | 1 | #ifndef __LINUX_SLOB_DEF_H |
2 | #define __LINUX_SLOB_DEF_H | 2 | #define __LINUX_SLOB_DEF_H |
3 | 3 | ||
4 | #ifdef ARCH_DMA_MINALIGN | ||
5 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
6 | #else | ||
7 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) | ||
8 | #endif | ||
9 | |||
10 | #ifndef ARCH_SLAB_MINALIGN | ||
11 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) | ||
12 | #endif | ||
13 | |||
14 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
15 | 5 | ||
16 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index c8668d161dd8..fd4fdc72bc8c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -113,16 +113,6 @@ struct kmem_cache { | |||
113 | 113 | ||
114 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 114 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
115 | 115 | ||
116 | #ifdef ARCH_DMA_MINALIGN | ||
117 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
118 | #else | ||
119 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | ||
120 | #endif | ||
121 | |||
122 | #ifndef ARCH_SLAB_MINALIGN | ||
123 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
124 | #endif | ||
125 | |||
126 | /* | 116 | /* |
127 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | 117 | * Maximum kmalloc object size handled by SLUB. Larger object allocations |
128 | * are passed through to the page allocator. The page allocator "fastpath" | 118 | * are passed through to the page allocator. The page allocator "fastpath" |