diff options
author | Chris Zankel <czankel@tensilica.com> | 2005-06-24 01:01:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-24 03:05:22 -0400 |
commit | 9a8fd5589902153a134111ed7a40f9cca1f83254 (patch) | |
tree | 6f7a06de25bdf0b2d94623794c2cbbc66b5a77f6 /include/asm-xtensa/cacheflush.h | |
parent | 3f65ce4d141e435e54c20ed2379d983d362a2cb5 (diff) |
[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 6
The attached patches provides part 6 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: Chris Zankel <chris@zankel.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-xtensa/cacheflush.h')
-rw-r--r-- | include/asm-xtensa/cacheflush.h | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h new file mode 100644 index 000000000000..44a36e087844 --- /dev/null +++ b/include/asm-xtensa/cacheflush.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * include/asm-xtensa/cacheflush.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * (C) 2001 - 2005 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_CACHEFLUSH_H | ||
12 | #define _XTENSA_CACHEFLUSH_H | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/mm.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/page.h> | ||
19 | |||
20 | /* | ||
21 | * flush and invalidate data cache, invalidate instruction cache: | ||
22 | * | ||
23 | * __flush_invalidate_cache_all() | ||
24 | * __flush_invalidate_cache_range(from,sze) | ||
25 | * | ||
26 | * invalidate data or instruction cache: | ||
27 | * | ||
28 | * __invalidate_icache_all() | ||
29 | * __invalidate_icache_page(adr) | ||
30 | * __invalidate_dcache_page(adr) | ||
31 | * __invalidate_icache_range(from,size) | ||
32 | * __invalidate_dcache_range(from,size) | ||
33 | * | ||
34 | * flush data cache: | ||
35 | * | ||
36 | * __flush_dcache_page(adr) | ||
37 | * | ||
38 | * flush and invalidate data cache: | ||
39 | * | ||
40 | * __flush_invalidate_dcache_all() | ||
41 | * __flush_invalidate_dcache_page(adr) | ||
42 | * __flush_invalidate_dcache_range(from,size) | ||
43 | */ | ||
44 | |||
45 | extern void __flush_invalidate_cache_all(void); | ||
46 | extern void __flush_invalidate_cache_range(unsigned long, unsigned long); | ||
47 | extern void __flush_invalidate_dcache_all(void); | ||
48 | extern void __invalidate_icache_all(void); | ||
49 | |||
50 | extern void __invalidate_dcache_page(unsigned long); | ||
51 | extern void __invalidate_icache_page(unsigned long); | ||
52 | extern void __invalidate_icache_range(unsigned long, unsigned long); | ||
53 | extern void __invalidate_dcache_range(unsigned long, unsigned long); | ||
54 | |||
55 | #if XCHAL_DCACHE_IS_WRITEBACK | ||
56 | extern void __flush_dcache_page(unsigned long); | ||
57 | extern void __flush_invalidate_dcache_page(unsigned long); | ||
58 | extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); | ||
59 | #else | ||
60 | # define __flush_dcache_page(p) do { } while(0) | ||
61 | # define __flush_invalidate_dcache_page(p) do { } while(0) | ||
62 | # define __flush_invalidate_dcache_range(p,s) do { } while(0) | ||
63 | #endif | ||
64 | |||
65 | /* | ||
66 | * We have physically tagged caches - nothing to do here - | ||
67 | * unless we have cache aliasing. | ||
68 | * | ||
69 | * Pages can get remapped. Because this might change the 'color' of that page, | ||
70 | * we have to flush the cache before the PTE is changed. | ||
71 | * (see also Documentation/cachetlb.txt) | ||
72 | */ | ||
73 | |||
74 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | ||
75 | |||
76 | #define flush_cache_all() __flush_invalidate_cache_all(); | ||
77 | #define flush_cache_mm(mm) __flush_invalidate_cache_all(); | ||
78 | |||
79 | #define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); | ||
80 | #define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); | ||
81 | |||
82 | extern void flush_dcache_page(struct page*); | ||
83 | |||
84 | extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); | ||
85 | extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); | ||
86 | |||
87 | #else | ||
88 | |||
89 | #define flush_cache_all() do { } while (0) | ||
90 | #define flush_cache_mm(mm) do { } while (0) | ||
91 | |||
92 | #define flush_cache_vmap(start,end) do { } while (0) | ||
93 | #define flush_cache_vunmap(start,end) do { } while (0) | ||
94 | |||
95 | #define flush_dcache_page(page) do { } while (0) | ||
96 | |||
97 | #define flush_cache_page(vma,addr,pfn) do { } while (0) | ||
98 | #define flush_cache_range(vma,start,end) do { } while (0) | ||
99 | |||
100 | #endif | ||
101 | |||
102 | #define flush_icache_range(start,end) \ | ||
103 | __invalidate_icache_range(start,(end)-(start)) | ||
104 | |||
105 | /* This is not required, see Documentation/cachetlb.txt */ | ||
106 | |||
107 | #define flush_icache_page(vma,page) do { } while(0) | ||
108 | |||
109 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
110 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
111 | |||
112 | |||
113 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
114 | memcpy(dst, src, len) | ||
115 | |||
116 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
117 | memcpy(dst, src, len) | ||
118 | |||
119 | #endif /* __KERNEL__ */ | ||
120 | |||
121 | #endif /* _XTENSA_CACHEFLUSH_H */ | ||
122 | |||