aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ad4dd1c8d30a..573c809c33d9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -134,6 +134,26 @@ unsigned int kmem_cache_size(struct kmem_cache *);
134#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 134#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
135 135
136/* 136/*
137 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
138 * alignment larger than the alignment of a 64-bit integer.
139 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
140 */
141#ifdef ARCH_DMA_MINALIGN
142#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
143#else
144#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
145#endif
146
147/*
148 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
149 * Intended for arches that get misalignment faults even for 64 bit integer
150 * aligned buffers.
151 */
152#ifndef ARCH_SLAB_MINALIGN
153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
154#endif
155
156/*
137 * Common kmalloc functions provided by all allocators 157 * Common kmalloc functions provided by all allocators
138 */ 158 */
139void * __must_check __krealloc(const void *, size_t, gfp_t); 159void * __must_check __krealloc(const void *, size_t, gfp_t);