diff options
| author | Paul Mundt <lethal@linux-sh.org> | 2007-07-31 04:07:28 -0400 |
|---|---|---|
| committer | Paul Mundt <lethal@linux-sh.org> | 2007-09-20 22:57:46 -0400 |
| commit | e7bd34a15b85655f24d1b45edbe3bdfebf9d027e (patch) | |
| tree | 051647273266582fe95dcc5cf008534c264be5ae /include/asm-sh | |
| parent | ac919986d7dfc5d1d9f5585521307f222a8ebeaf (diff) | |
sh: Support explicit L1 cache disabling.
This reworks the cache mode configuration in Kconfig, and allows for
explicit selection of write-back/write-through/off configurations.
All of the cache flushing routines are optimized away for the off
case.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
| -rw-r--r-- | include/asm-sh/cacheflush.h | 33 | ||||
| -rw-r--r-- | include/asm-sh/page.h | 6 | ||||
| -rw-r--r-- | include/asm-sh/pgtable.h | 3 |
3 files changed, 37 insertions, 5 deletions
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 07f62ec9ff0c..aa558da08471 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h | |||
| @@ -1,16 +1,47 @@ | |||
| 1 | #ifndef __ASM_SH_CACHEFLUSH_H | 1 | #ifndef __ASM_SH_CACHEFLUSH_H |
| 2 | #define __ASM_SH_CACHEFLUSH_H | 2 | #define __ASM_SH_CACHEFLUSH_H |
| 3 | |||
| 3 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
| 4 | 5 | ||
| 5 | #include <linux/mm.h> | 6 | #ifdef CONFIG_CACHE_OFF |
| 7 | /* | ||
| 8 | * Nothing to do when the cache is disabled, initial flush and explicit | ||
| 9 | * disabling is handled at CPU init time. | ||
| 10 | * | ||
| 11 | * See arch/sh/kernel/cpu/init.c:cache_init(). | ||
| 12 | */ | ||
| 13 | #define p3_cache_init() do { } while (0) | ||
| 14 | #define flush_cache_all() do { } while (0) | ||
| 15 | #define flush_cache_mm(mm) do { } while (0) | ||
| 16 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
| 17 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
| 18 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
| 19 | #define flush_dcache_page(page) do { } while (0) | ||
| 20 | #define flush_icache_range(start, end) do { } while (0) | ||
| 21 | #define flush_icache_page(vma,pg) do { } while (0) | ||
| 22 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
| 23 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
| 24 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
| 25 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
| 26 | #define __flush_wback_region(start, size) do { (void)(start); } while (0) | ||
| 27 | #define __flush_purge_region(start, size) do { (void)(start); } while (0) | ||
| 28 | #define __flush_invalidate_region(start, size) do { (void)(start); } while (0) | ||
| 29 | #else | ||
| 6 | #include <asm/cpu/cacheflush.h> | 30 | #include <asm/cpu/cacheflush.h> |
| 7 | 31 | ||
| 32 | /* | ||
| 33 | * Consistent DMA requires that the __flush_xxx() primitives must be set | ||
| 34 | * for any of the enabled non-coherent caches (most of the UP CPUs), | ||
| 35 | * regardless of PIPT or VIPT cache configurations. | ||
| 36 | */ | ||
| 37 | |||
| 8 | /* Flush (write-back only) a region (smaller than a page) */ | 38 | /* Flush (write-back only) a region (smaller than a page) */ |
| 9 | extern void __flush_wback_region(void *start, int size); | 39 | extern void __flush_wback_region(void *start, int size); |
| 10 | /* Flush (write-back & invalidate) a region (smaller than a page) */ | 40 | /* Flush (write-back & invalidate) a region (smaller than a page) */ |
| 11 | extern void __flush_purge_region(void *start, int size); | 41 | extern void __flush_purge_region(void *start, int size); |
| 12 | /* Flush (invalidate only) a region (smaller than a page) */ | 42 | /* Flush (invalidate only) a region (smaller than a page) */ |
| 13 | extern void __flush_invalidate_region(void *start, int size); | 43 | extern void __flush_invalidate_region(void *start, int size); |
| 44 | #endif | ||
| 14 | 45 | ||
| 15 | #define flush_cache_vmap(start, end) flush_cache_all() | 46 | #define flush_cache_vmap(start, end) flush_cache_all() |
| 16 | #define flush_cache_vunmap(start, end) flush_cache_all() | 47 | #define flush_cache_vunmap(start, end) flush_cache_all() |
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 6bc9bba10105..48b718e7455c 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
| @@ -70,14 +70,14 @@ extern void clear_page_nommu(void *to); | |||
| 70 | extern void copy_page_nommu(void *to, void *from); | 70 | extern void copy_page_nommu(void *to, void *from); |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| 73 | #if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ | 73 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ |
| 74 | defined(CONFIG_SH7705_CACHE_32KB)) | 74 | (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) |
| 75 | struct page; | 75 | struct page; |
| 76 | extern void clear_user_page(void *to, unsigned long address, struct page *pg); | 76 | extern void clear_user_page(void *to, unsigned long address, struct page *pg); |
| 77 | extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); | 77 | extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); |
| 78 | extern void __clear_user_page(void *to, void *orig_to); | 78 | extern void __clear_user_page(void *to, void *orig_to); |
| 79 | extern void __copy_user_page(void *to, void *from, void *orig_to); | 79 | extern void __copy_user_page(void *to, void *from, void *orig_to); |
| 80 | #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) | 80 | #else |
| 81 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 81 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
| 82 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 82 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
| 83 | #endif | 83 | #endif |
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index e3fae12c0e49..54ad5037fe40 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h | |||
| @@ -563,7 +563,8 @@ struct mm_struct; | |||
| 563 | extern unsigned int kobjsize(const void *objp); | 563 | extern unsigned int kobjsize(const void *objp); |
| 564 | #endif /* !CONFIG_MMU */ | 564 | #endif /* !CONFIG_MMU */ |
| 565 | 565 | ||
| 566 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | 566 | #if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ |
| 567 | defined(CONFIG_SH7705_CACHE_32KB)) | ||
| 567 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 568 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 568 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 569 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
| 569 | #endif | 570 | #endif |
