diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-11-27 07:42:48 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-11-27 07:42:48 -0500 |
commit | f412b09f4ed7c57f5b8935ed7d6fc786f402a629 (patch) | |
tree | 34fe1b4b64db4993e9fb21a70812fafed0437870 /arch/arm/include | |
parent | 31bccbf39208133415000520c79ebe7b291786df (diff) | |
parent | 7f1fd31db158c95418d9cc5690ab60ecc6fb632d (diff) |
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6 into devel
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 36 | ||||
-rw-r--r-- | arch/arm/include/asm/hwcap.h | 1 |
2 files changed, 27 insertions, 10 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index de6c59f814a1..85a2514cbffc 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/glue.h> | 16 | #include <asm/glue.h> |
17 | #include <asm/shmparam.h> | 17 | #include <asm/shmparam.h> |
18 | #include <asm/cachetype.h> | ||
18 | 19 | ||
19 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | 20 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
20 | 21 | ||
@@ -296,16 +297,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) | |||
296 | #endif | 297 | #endif |
297 | 298 | ||
298 | /* | 299 | /* |
299 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | ||
300 | * vmalloc, ioremap etc) in kernel space for pages. Since the | ||
301 | * direct-mappings of these pages may contain cached data, we need | ||
302 | * to do a full cache flush to ensure that writebacks don't corrupt | ||
303 | * data placed into these pages via the new mappings. | ||
304 | */ | ||
305 | #define flush_cache_vmap(start, end) flush_cache_all() | ||
306 | #define flush_cache_vunmap(start, end) flush_cache_all() | ||
307 | |||
308 | /* | ||
309 | * Copy user data from/to a page which is mapped into a different | 300 | * Copy user data from/to a page which is mapped into a different |
310 | * processes address space. Really, we want to allow our "user | 301 | * processes address space. Really, we want to allow our "user |
311 | * space" model to handle this. | 302 | * space" model to handle this. |
@@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, | |||
444 | dmac_inv_range(start, start + size); | 435 | dmac_inv_range(start, start + size); |
445 | } | 436 | } |
446 | 437 | ||
438 | /* | ||
439 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | ||
440 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT | ||
441 | * caches, since the direct-mappings of these pages may contain cached | ||
442 | * data, we need to do a full cache flush to ensure that writebacks | ||
443 | * don't corrupt data placed into these pages via the new mappings. | ||
444 | */ | ||
445 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | ||
446 | { | ||
447 | if (!cache_is_vipt_nonaliasing()) | ||
448 | flush_cache_all(); | ||
449 | else | ||
450 | /* | ||
451 | * set_pte_at() called from vmap_pte_range() does not | ||
452 | * have a DSB after cleaning the cache line. | ||
453 | */ | ||
454 | dsb(); | ||
455 | } | ||
456 | |||
457 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | ||
458 | { | ||
459 | if (!cache_is_vipt_nonaliasing()) | ||
460 | flush_cache_all(); | ||
461 | } | ||
462 | |||
447 | #endif | 463 | #endif |
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 81f4c899a555..bda489f9f017 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define HWCAP_IWMMXT 512 | 16 | #define HWCAP_IWMMXT 512 |
17 | #define HWCAP_CRUNCH 1024 | 17 | #define HWCAP_CRUNCH 1024 |
18 | #define HWCAP_THUMBEE 2048 | 18 | #define HWCAP_THUMBEE 2048 |
19 | #define HWCAP_NEON 4096 | ||
19 | 20 | ||
20 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 21 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
21 | /* | 22 | /* |