aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/cpu-sh4/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/cpu-sh4/cacheflush.h')
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h36
1 files changed, 12 insertions, 24 deletions
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index f323567e085f..515fd574267c 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -16,40 +16,29 @@
16 * caching; in which case they're only semi-broken), 16 * caching; in which case they're only semi-broken),
17 * so we need them. 17 * so we need them.
18 */ 18 */
19 19void flush_cache_all(void);
20/* Page is 4K, OC size is 16K, there are four lines. */ 20void flush_cache_mm(struct mm_struct *mm);
21#define CACHE_ALIAS 0x00003000 21void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
22 22 unsigned long end);
23struct page; 23void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
24struct mm_struct; 24 unsigned long pfn);
25struct vm_area_struct; 25void flush_dcache_page(struct page *pg);
26
27extern void flush_cache_all(void);
28extern void flush_cache_mm(struct mm_struct *mm);
29extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
30 unsigned long end);
31extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
32extern void flush_dcache_page(struct page *pg);
33 26
34#define flush_dcache_mmap_lock(mapping) do { } while (0) 27#define flush_dcache_mmap_lock(mapping) do { } while (0)
35#define flush_dcache_mmap_unlock(mapping) do { } while (0) 28#define flush_dcache_mmap_unlock(mapping) do { } while (0)
36 29
37extern void flush_icache_range(unsigned long start, unsigned long end); 30void flush_icache_range(unsigned long start, unsigned long end);
38extern void flush_cache_sigtramp(unsigned long addr); 31void flush_cache_sigtramp(unsigned long addr);
39extern void flush_icache_user_range(struct vm_area_struct *vma, 32void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
40 struct page *page, unsigned long addr, 33 unsigned long addr, int len);
41 int len);
42 34
43#define flush_icache_page(vma,pg) do { } while (0) 35#define flush_icache_page(vma,pg) do { } while (0)
44 36
45/* Initialization of P3 area for copy_user_page */ 37/* Initialization of P3 area for copy_user_page */
46extern void p3_cache_init(void); 38void p3_cache_init(void);
47 39
48#define PG_mapped PG_arch_1 40#define PG_mapped PG_arch_1
49 41
50/* We provide our own get_unmapped_area to avoid cache alias issue */
51#define HAVE_ARCH_UNMAPPED_AREA
52
53#ifdef CONFIG_MMU 42#ifdef CONFIG_MMU
54extern int remap_area_pages(unsigned long addr, unsigned long phys_addr, 43extern int remap_area_pages(unsigned long addr, unsigned long phys_addr,
55 unsigned long size, unsigned long flags); 44 unsigned long size, unsigned long flags);
@@ -61,4 +50,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
61} 50}
62#endif /* CONFIG_MMU */ 51#endif /* CONFIG_MMU */
63#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 52#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
64