diff options
Diffstat (limited to 'include/asm-sh/page.h')
-rw-r--r-- | include/asm-sh/page.h | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 407bf5a14936..bff635a078c8 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
@@ -138,22 +138,18 @@ typedef struct { unsigned long pgd; } pgd_t; | |||
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Slub defaults to 8-byte alignment, we're only interested in 4. | 141 | * Some drivers need to perform DMA into kmalloc'ed buffers |
142 | * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. | 142 | * and so we have to increase the kmalloc minalign for this. |
143 | */ | 143 | */ |
144 | #ifdef CONFIG_SUPERH32 | 144 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
145 | #define ARCH_KMALLOC_MINALIGN 4 | ||
146 | #define ARCH_SLAB_MINALIGN 4 | ||
147 | #else | ||
148 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | ||
149 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | ||
150 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | ||
151 | sh64 at the moment). */ | ||
152 | #define ARCH_KMALLOC_MINALIGN 8 | ||
153 | 145 | ||
146 | #ifdef CONFIG_SUPERH64 | ||
154 | /* | 147 | /* |
155 | * We want 8-byte alignment for the slab caches as well, otherwise we have | 148 | * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still |
156 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | 149 | * happily generate {ld/st}.q pairs, requiring us to have 8-byte |
150 | * alignment to avoid traps. The kmalloc alignment is gauranteed by | ||
151 | * virtue of L1_CACHE_BYTES, requiring this to only be special cased | ||
152 | * for slab caches. | ||
157 | */ | 153 | */ |
158 | #define ARCH_SLAB_MINALIGN 8 | 154 | #define ARCH_SLAB_MINALIGN 8 |
159 | #endif | 155 | #endif |