aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/cpu-sh3/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/cpu-sh3/cacheflush.h')
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h48
1 files changed, 21 insertions, 27 deletions
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index db0cb071ea8e..97f5a64c2ab8 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -35,47 +35,41 @@
35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ 35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
36#define CACHE_ALIAS 0x00001000 36#define CACHE_ALIAS 0x00001000
37 37
38extern void flush_cache_all(void);
39extern void flush_cache_mm(struct mm_struct *mm);
40extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
41 unsigned long end);
42extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
43extern void flush_dcache_page(struct page *pg);
44extern void flush_icache_range(unsigned long start, unsigned long end);
45extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
46
47#define flush_dcache_mmap_lock(mapping) do { } while (0)
48#define flush_dcache_mmap_unlock(mapping) do { } while (0)
49
50/* SH3 has unified cache so no special action needed here */
51#define flush_cache_sigtramp(vaddr) do { } while (0)
52#define flush_page_to_ram(page) do { } while (0)
53#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
54
55#define p3_cache_init() do { } while (0)
56
57#define PG_mapped PG_arch_1 38#define PG_mapped PG_arch_1
58 39
59/* We provide our own get_unmapped_area to avoid cache alias issue */ 40void flush_cache_all(void);
60#define HAVE_ARCH_UNMAPPED_AREA 41void flush_cache_mm(struct mm_struct *mm);
61 42void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
43 unsigned long end);
44void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
45void flush_dcache_page(struct page *pg);
46void flush_icache_range(unsigned long start, unsigned long end);
47void flush_icache_page(struct vm_area_struct *vma, struct page *page);
62#else 48#else
63
64#define flush_cache_all() do { } while (0) 49#define flush_cache_all() do { } while (0)
65#define flush_cache_mm(mm) do { } while (0) 50#define flush_cache_mm(mm) do { } while (0)
66#define flush_cache_range(vma, start, end) do { } while (0) 51#define flush_cache_range(vma, start, end) do { } while (0)
67#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 52#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
68#define flush_dcache_page(page) do { } while (0) 53#define flush_dcache_page(page) do { } while (0)
69#define flush_dcache_mmap_lock(mapping) do { } while (0)
70#define flush_dcache_mmap_unlock(mapping) do { } while (0)
71#define flush_icache_range(start, end) do { } while (0) 54#define flush_icache_range(start, end) do { } while (0)
72#define flush_icache_page(vma,pg) do { } while (0) 55#define flush_icache_page(vma,pg) do { } while (0)
73#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 56#endif
57
58#define flush_dcache_mmap_lock(mapping) do { } while (0)
59#define flush_dcache_mmap_unlock(mapping) do { } while (0)
60
61/* SH3 has unified cache so no special action needed here */
74#define flush_cache_sigtramp(vaddr) do { } while (0) 62#define flush_cache_sigtramp(vaddr) do { } while (0)
63#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
75 64
76#define p3_cache_init() do { } while (0) 65#define p3_cache_init() do { } while (0)
77 66
78#endif 67/*
68 * We provide our own get_unmapped_area to avoid cache aliasing issues
69 * on SH7705 with a 32KB cache, and to page align addresses in the
70 * non-aliasing case.
71 */
72#define HAVE_ARCH_UNMAPPED_AREA
79 73
80#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ 74#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
81 75