aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/crypto.h6
-rw-r--r--include/linux/slab_def.h24
-rw-r--r--include/linux/slob_def.h8
-rw-r--r--include/linux/slub_def.h8
4 files changed, 40 insertions, 6 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 24d2e30f1b46..a6a7a1c83f54 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -99,13 +99,7 @@
99 * as arm where pointers are 32-bit aligned but there are data types such as 99 * as arm where pointers are 32-bit aligned but there are data types such as
100 * u64 which require 64-bit alignment. 100 * u64 which require 64-bit alignment.
101 */ 101 */
102#if defined(ARCH_KMALLOC_MINALIGN)
103#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 102#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
104#elif defined(ARCH_SLAB_MINALIGN)
105#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN
106#else
107#define CRYPTO_MINALIGN __alignof__(unsigned long long)
108#endif
109 103
110#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 104#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
111 105
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index ca6b2b317991..1812dac8c496 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -16,6 +16,30 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kmemtrace.h> 17#include <linux/kmemtrace.h>
18 18
19#ifndef ARCH_KMALLOC_MINALIGN
20/*
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
28 */
29#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
30#endif
31
32#ifndef ARCH_SLAB_MINALIGN
33/*
34 * Enforce a minimum alignment for all caches.
35 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
36 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
37 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
38 * some debug features.
39 */
40#define ARCH_SLAB_MINALIGN 0
41#endif
42
19/* 43/*
20 * struct kmem_cache 44 * struct kmem_cache
21 * 45 *
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 0ec00b39d006..62667f72c2ef 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -1,6 +1,14 @@
1#ifndef __LINUX_SLOB_DEF_H 1#ifndef __LINUX_SLOB_DEF_H
2#define __LINUX_SLOB_DEF_H 2#define __LINUX_SLOB_DEF_H
3 3
4#ifndef ARCH_KMALLOC_MINALIGN
5#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
6#endif
7
8#ifndef ARCH_SLAB_MINALIGN
9#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
10#endif
11
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 12void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 13
6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, 14static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0249d4175bac..55695c8d2f8a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -116,6 +116,14 @@ struct kmem_cache {
116 116
117#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 117#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
118 118
119#ifndef ARCH_KMALLOC_MINALIGN
120#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
121#endif
122
123#ifndef ARCH_SLAB_MINALIGN
124#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
125#endif
126
119/* 127/*
120 * Maximum kmalloc object size handled by SLUB. Larger object allocations 128 * Maximum kmalloc object size handled by SLUB. Larger object allocations
121 * are passed through to the page allocator. The page allocator "fastpath" 129 * are passed through to the page allocator. The page allocator "fastpath"