diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-27 01:57:30 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:58 -0500 |
commit | 66d485b45a5493f6a2ca067c6f472e7b2ca342c2 (patch) | |
tree | cda9d1614122fe5183ce2b1f26f630aa0558f01e /include/asm-sh | |
parent | eddeeb32fe303910c58c4e3c27fde4b6f1503350 (diff) |
sh: Bump up ARCH_KMALLOC_MINALIGN for DMA cases.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r-- | include/asm-sh/page.h | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 407bf5a14936..bff635a078c8 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
@@ -138,22 +138,18 @@ typedef struct { unsigned long pgd; } pgd_t; | |||
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Slub defaults to 8-byte alignment, we're only interested in 4. | 141 | * Some drivers need to perform DMA into kmalloc'ed buffers |
142 | * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. | 142 | * and so we have to increase the kmalloc minalign for this. |
143 | */ | 143 | */ |
144 | #ifdef CONFIG_SUPERH32 | 144 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
145 | #define ARCH_KMALLOC_MINALIGN 4 | ||
146 | #define ARCH_SLAB_MINALIGN 4 | ||
147 | #else | ||
148 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | ||
149 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | ||
150 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | ||
151 | sh64 at the moment). */ | ||
152 | #define ARCH_KMALLOC_MINALIGN 8 | ||
153 | 145 | ||
146 | #ifdef CONFIG_SUPERH64 | ||
154 | /* | 147 | /* |
155 | * We want 8-byte alignment for the slab caches as well, otherwise we have | 148 | * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still |
156 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | 149 | * happily generate {ld/st}.q pairs, requiring us to have 8-byte |
150 | * alignment to avoid traps. The kmalloc alignment is gauranteed by | ||
151 | * virtue of L1_CACHE_BYTES, requiring this to only be special cased | ||
152 | * for slab caches. | ||
157 | */ | 153 | */ |
158 | #define ARCH_SLAB_MINALIGN 8 | 154 | #define ARCH_SLAB_MINALIGN 8 |
159 | #endif | 155 | #endif |