diff options
| author | Martin Fuzzey <mfuzzey@gmail.com> | 2009-06-01 04:19:37 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-06-02 17:36:15 -0400 |
| commit | eb5f4ca9536ba297c98721ecbbdf41ec5b987bd5 (patch) | |
| tree | fb1b7dcf66f2f4d00b99776968dca1293f74b8c8 | |
| parent | d9244b5d2fbfe9fa540024b410047af13ceec90f (diff) | |
[ARM] 5534/1: kmalloc must return a cache line aligned buffer
Define ARCH_KMALLOC_MINALIGN in asm/cache.h
At the request of Russell also move ARCH_SLAB_MINALIGN to this file.
Signed-off-by: Martin Fuzzey <mfuzzey@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/include/asm/cache.h | 16 | ||||
| -rw-r--r-- | arch/arm/include/asm/page.h | 7 |
2 files changed, 16 insertions, 7 deletions
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index cb7a9e97fd7e..feaa75f0013e 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h | |||
| @@ -7,4 +7,20 @@ | |||
| 7 | #define L1_CACHE_SHIFT 5 | 7 | #define L1_CACHE_SHIFT 5 |
| 8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Memory returned by kmalloc() may be used for DMA, so we must make | ||
| 12 | * sure that all such allocations are cache aligned. Otherwise, | ||
| 13 | * unrelated code may cause parts of the buffer to be read into the | ||
| 14 | * cache before the transfer is done, causing old data to be seen by | ||
| 15 | * the CPU. | ||
| 16 | */ | ||
| 17 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | ||
| 18 | |||
| 19 | /* | ||
| 20 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | ||
| 21 | */ | ||
| 22 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | ||
| 23 | #define ARCH_SLAB_MINALIGN 8 | ||
| 24 | #endif | ||
| 25 | |||
| 10 | #endif | 26 | #endif |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index e6eb8a67b807..7b522770f29d 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
| @@ -202,13 +202,6 @@ typedef struct page *pgtable_t; | |||
| 202 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ | 202 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ |
| 203 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 203 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 204 | 204 | ||
| 205 | /* | ||
| 206 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | ||
| 207 | */ | ||
| 208 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | ||
| 209 | #define ARCH_SLAB_MINALIGN 8 | ||
| 210 | #endif | ||
| 211 | |||
| 212 | #include <asm-generic/page.h> | 205 | #include <asm-generic/page.h> |
| 213 | 206 | ||
| 214 | #endif | 207 | #endif |
