aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-02-05 11:36:47 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-06 13:32:13 -0500
commitc601fd6956e92b0eb268d4af754073c76155b99d (patch)
tree98703cec4f2d969ea79520067babd17bedfa2222 /include/linux/slab.h
parentca34956b804b7554fc4e88826773380d9d5122a8 (diff)
slab: Handle ARCH_DMA_MINALIGN correctly
James Hogan hit boot problems in next-20130204 on Meta: META213-Thread0 DSP [LogF] kobject (4fc03980): tried to init an initialized object, something is seriously wrong. META213-Thread0 DSP [LogF] META213-Thread0 DSP [LogF] Call trace: META213-Thread0 DSP [LogF] [<4000888c>] _show_stack+0x68/0x7c META213-Thread0 DSP [LogF] [<400088b4>] _dump_stack+0x14/0x28 META213-Thread0 DSP [LogF] [<40103794>] _kobject_init+0x58/0x9c META213-Thread0 DSP [LogF] [<40103810>] _kobject_create+0x38/0x64 META213-Thread0 DSP [LogF] [<40103eac>] _kobject_create_and_add+0x14/0x8c META213-Thread0 DSP [LogF] [<40190ac4>] _mnt_init+0xd8/0x220 META213-Thread0 DSP [LogF] [<40190508>] _vfs_caches_init+0xb0/0x160 META213-Thread0 DSP [LogF] [<401851f4>] _start_kernel+0x274/0x340 META213-Thread0 DSP [LogF] [<40188424>] _metag_start_kernel+0x58/0x6c META213-Thread0 DSP [LogF] [<40000044>] __start+0x44/0x48 META213-Thread0 DSP [LogF] META213-Thread0 DSP [LogF] devtmpfs: initialized META213-Thread0 DSP [LogF] L2 Cache: Not present META213-Thread0 DSP [LogF] BUG: failure at fs/sysfs/dir.c:736/sysfs_read_ns_type()! META213-Thread0 DSP [LogF] Kernel panic - not syncing: BUG! META213-Thread0 DSP [Thread Exit] Thread has exited - return code = 4294967295 And bisected the problem to commit 95a05b4 ("slab: Common constants for kmalloc boundaries"). As it turns out, a fixed KMALLOC_SHIFT_LOW does not work for arches with higher alignment requirements. Determine KMALLOC_SHIFT_LOW from ARCH_DMA_MINALIGN instead. Reported-and-tested-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h32
1 files changed, 18 insertions, 14 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index f2327a898a85..0c621752caa6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -133,6 +133,19 @@ void kfree(const void *);
133void kzfree(const void *); 133void kzfree(const void *);
134size_t ksize(const void *); 134size_t ksize(const void *);
135 135
136/*
137 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
138 * alignment larger than the alignment of a 64-bit integer.
139 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
140 */
141#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
142#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
143#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
144#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
145#else
146#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
147#endif
148
136#ifdef CONFIG_SLOB 149#ifdef CONFIG_SLOB
137/* 150/*
138 * Common fields provided in kmem_cache by all slab allocators 151 * Common fields provided in kmem_cache by all slab allocators
@@ -179,7 +192,9 @@ struct kmem_cache {
179#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 192#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
180 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 193 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
181#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 194#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
195#ifndef KMALLOC_SHIFT_LOW
182#define KMALLOC_SHIFT_LOW 5 196#define KMALLOC_SHIFT_LOW 5
197#endif
183#else 198#else
184/* 199/*
185 * SLUB allocates up to order 2 pages directly and otherwise 200 * SLUB allocates up to order 2 pages directly and otherwise
@@ -187,8 +202,10 @@ struct kmem_cache {
187 */ 202 */
188#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 203#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
189#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 204#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
205#ifndef KMALLOC_SHIFT_LOW
190#define KMALLOC_SHIFT_LOW 3 206#define KMALLOC_SHIFT_LOW 3
191#endif 207#endif
208#endif
192 209
193/* Maximum allocatable size */ 210/* Maximum allocatable size */
194#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -200,9 +217,7 @@ struct kmem_cache {
200/* 217/*
201 * Kmalloc subsystem. 218 * Kmalloc subsystem.
202 */ 219 */
203#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 220#ifndef KMALLOC_MIN_SIZE
204#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
205#else
206#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
207#endif 222#endif
208 223
@@ -290,17 +305,6 @@ static __always_inline int kmalloc_size(int n)
290#endif /* !CONFIG_SLOB */ 305#endif /* !CONFIG_SLOB */
291 306
292/* 307/*
293 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
294 * alignment larger than the alignment of a 64-bit integer.
295 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
296 */
297#ifdef ARCH_DMA_MINALIGN
298#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
299#else
300#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
301#endif
302
303/*
304 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 308 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
305 * Intended for arches that get misalignment faults even for 64 bit integer 309 * Intended for arches that get misalignment faults even for 64 bit integer
306 * aligned buffers. 310 * aligned buffers.