diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-10 05:57:58 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:43 -0500 |
commit | 01fed9311ab8a724283b3f456c12e573cb51d92b (patch) | |
tree | a270222feed305e586c779063df50b1447ef877f /include | |
parent | 9b01bd9ee6408846c0553c03fb4b864353a845c9 (diff) |
sh: Consolidate slab/kmalloc minalign values.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-sh/page.h | 14 | ||||
-rw-r--r-- | include/asm-sh/uaccess_64.h | 12 |
2 files changed, 14 insertions, 12 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index d00a8fde7c7f..d0273dbce6be 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
@@ -157,8 +157,22 @@ typedef struct { unsigned long pgd; } pgd_t; | |||
157 | * Slub defaults to 8-byte alignment, we're only interested in 4. | 157 | * Slub defaults to 8-byte alignment, we're only interested in 4. |
158 | * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. | 158 | * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. |
159 | */ | 159 | */ |
160 | #ifdef CONFIG_SUPERH32 | ||
160 | #define ARCH_KMALLOC_MINALIGN 4 | 161 | #define ARCH_KMALLOC_MINALIGN 4 |
161 | #define ARCH_SLAB_MINALIGN 4 | 162 | #define ARCH_SLAB_MINALIGN 4 |
163 | #else | ||
164 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | ||
165 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | ||
166 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | ||
167 | sh64 at the moment). */ | ||
168 | #define ARCH_KMALLOC_MINALIGN 8 | ||
169 | |||
170 | /* | ||
171 | * We want 8-byte alignment for the slab caches as well, otherwise we have | ||
172 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | ||
173 | */ | ||
174 | #define ARCH_SLAB_MINALIGN 8 | ||
175 | #endif | ||
162 | 176 | ||
163 | #endif /* __KERNEL__ */ | 177 | #endif /* __KERNEL__ */ |
164 | #endif /* __ASM_SH_PAGE_H */ | 178 | #endif /* __ASM_SH_PAGE_H */ |
diff --git a/include/asm-sh/uaccess_64.h b/include/asm-sh/uaccess_64.h index 644c67b65f94..24800a8045ce 100644 --- a/include/asm-sh/uaccess_64.h +++ b/include/asm-sh/uaccess_64.h | |||
@@ -297,18 +297,6 @@ struct exception_table_entry | |||
297 | 297 | ||
298 | #define ARCH_HAS_SEARCH_EXTABLE | 298 | #define ARCH_HAS_SEARCH_EXTABLE |
299 | 299 | ||
300 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | ||
301 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | ||
302 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | ||
303 | sh64 at the moment). */ | ||
304 | #define ARCH_KMALLOC_MINALIGN 8 | ||
305 | |||
306 | /* | ||
307 | * We want 8-byte alignment for the slab caches as well, otherwise we have | ||
308 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | ||
309 | */ | ||
310 | #define ARCH_SLAB_MINALIGN 8 | ||
311 | |||
312 | /* Returns 0 if exception not found and fixup.unit otherwise. */ | 300 | /* Returns 0 if exception not found and fixup.unit otherwise. */ |
313 | extern unsigned long search_exception_table(unsigned long addr); | 301 | extern unsigned long search_exception_table(unsigned long addr); |
314 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); | 302 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); |