diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 02:29:18 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 02:29:18 -0400 |
commit | c470662854ff94d44bf8c192cefac3efa33db676 (patch) | |
tree | a54b42ea6bec224f8a211cd44355d2ee05d29de6 /include/asm-sh/cpu-sh3/cacheflush.h | |
parent | d7cdc9e8ac82c43fdcd4fde6b5b53d2dcba7f707 (diff) |
sh: Fixup SHMLBA definition for SH7705.
We need this set to something sensible anywhere were we have
an aliasing dcache..
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/cpu-sh3/cacheflush.h')
-rw-r--r-- | include/asm-sh/cpu-sh3/cacheflush.h | 48 |
1 files changed, 21 insertions, 27 deletions
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index db0cb071ea8e..97f5a64c2ab8 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h | |||
@@ -35,47 +35,41 @@ | |||
35 | /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ | 35 | /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ |
36 | #define CACHE_ALIAS 0x00001000 | 36 | #define CACHE_ALIAS 0x00001000 |
37 | 37 | ||
38 | extern void flush_cache_all(void); | ||
39 | extern void flush_cache_mm(struct mm_struct *mm); | ||
40 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
41 | unsigned long end); | ||
42 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); | ||
43 | extern void flush_dcache_page(struct page *pg); | ||
44 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
45 | extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
46 | |||
47 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
48 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
49 | |||
50 | /* SH3 has unified cache so no special action needed here */ | ||
51 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
52 | #define flush_page_to_ram(page) do { } while (0) | ||
53 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
54 | |||
55 | #define p3_cache_init() do { } while (0) | ||
56 | |||
57 | #define PG_mapped PG_arch_1 | 38 | #define PG_mapped PG_arch_1 |
58 | 39 | ||
59 | /* We provide our own get_unmapped_area to avoid cache alias issue */ | 40 | void flush_cache_all(void); |
60 | #define HAVE_ARCH_UNMAPPED_AREA | 41 | void flush_cache_mm(struct mm_struct *mm); |
61 | 42 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
43 | unsigned long end); | ||
44 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); | ||
45 | void flush_dcache_page(struct page *pg); | ||
46 | void flush_icache_range(unsigned long start, unsigned long end); | ||
47 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
62 | #else | 48 | #else |
63 | |||
64 | #define flush_cache_all() do { } while (0) | 49 | #define flush_cache_all() do { } while (0) |
65 | #define flush_cache_mm(mm) do { } while (0) | 50 | #define flush_cache_mm(mm) do { } while (0) |
66 | #define flush_cache_range(vma, start, end) do { } while (0) | 51 | #define flush_cache_range(vma, start, end) do { } while (0) |
67 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 52 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
68 | #define flush_dcache_page(page) do { } while (0) | 53 | #define flush_dcache_page(page) do { } while (0) |
69 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
70 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
71 | #define flush_icache_range(start, end) do { } while (0) | 54 | #define flush_icache_range(start, end) do { } while (0) |
72 | #define flush_icache_page(vma,pg) do { } while (0) | 55 | #define flush_icache_page(vma,pg) do { } while (0) |
73 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 56 | #endif |
57 | |||
58 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
59 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
60 | |||
61 | /* SH3 has unified cache so no special action needed here */ | ||
74 | #define flush_cache_sigtramp(vaddr) do { } while (0) | 62 | #define flush_cache_sigtramp(vaddr) do { } while (0) |
63 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
75 | 64 | ||
76 | #define p3_cache_init() do { } while (0) | 65 | #define p3_cache_init() do { } while (0) |
77 | 66 | ||
78 | #endif | 67 | /* |
68 | * We provide our own get_unmapped_area to avoid cache aliasing issues | ||
69 | * on SH7705 with a 32KB cache, and to page align addresses in the | ||
70 | * non-aliasing case. | ||
71 | */ | ||
72 | #define HAVE_ARCH_UNMAPPED_AREA | ||
79 | 73 | ||
80 | #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ | 74 | #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ |
81 | 75 | ||