aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2008-11-06 09:40:46 -0500
committerChris Zankel <chris@zankel.net>2008-11-06 13:25:09 -0500
commit367b8112fe2ea5c39a7bb4d263dcdd9b612fae18 (patch)
tree9f3349189718dd2c5678faf0ab38f389786b6925 /arch/xtensa/include/asm/cacheflush.h
parent206ead28377fee86b129637edada8c77816cc0d6 (diff)
xtensa: move headers files to arch/xtensa/include
Move all header files for xtensa to arch/xtensa/include and platform and variant header files to the appropriate arch/xtensa/platforms/ and arch/xtensa/variants/ directories. Moving the files gets also rid of all uses of symlinks in the Makefile. This has been completed already for the majority of the architectures and xtensa is one out of six missing. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/include/asm/cacheflush.h')
-rw-r--r--arch/xtensa/include/asm/cacheflush.h155
1 files changed, 155 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
new file mode 100644
index 000000000000..94c4c53a099e
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -0,0 +1,155 @@
1/*
2 * include/asm-xtensa/cacheflush.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * (C) 2001 - 2007 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_CACHEFLUSH_H
12#define _XTENSA_CACHEFLUSH_H
13
14#ifdef __KERNEL__
15
16#include <linux/mm.h>
17#include <asm/processor.h>
18#include <asm/page.h>
19
20/*
21 * Lo-level routines for cache flushing.
22 *
23 * invalidate data or instruction cache:
24 *
25 * __invalidate_icache_all()
26 * __invalidate_icache_page(adr)
27 * __invalidate_dcache_page(adr)
28 * __invalidate_icache_range(from,size)
29 * __invalidate_dcache_range(from,size)
30 *
31 * flush data cache:
32 *
33 * __flush_dcache_page(adr)
34 *
35 * flush and invalidate data cache:
36 *
37 * __flush_invalidate_dcache_all()
38 * __flush_invalidate_dcache_page(adr)
39 * __flush_invalidate_dcache_range(from,size)
40 *
41 * specials for cache aliasing:
42 *
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
45 */
46
47extern void __invalidate_dcache_all(void);
48extern void __invalidate_icache_all(void);
49extern void __invalidate_dcache_page(unsigned long);
50extern void __invalidate_icache_page(unsigned long);
51extern void __invalidate_icache_range(unsigned long, unsigned long);
52extern void __invalidate_dcache_range(unsigned long, unsigned long);
53
54
55#if XCHAL_DCACHE_IS_WRITEBACK
56extern void __flush_invalidate_dcache_all(void);
57extern void __flush_dcache_page(unsigned long);
58extern void __flush_dcache_range(unsigned long, unsigned long);
59extern void __flush_invalidate_dcache_page(unsigned long);
60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61#else
62# define __flush_dcache_range(p,s) do { } while(0)
63# define __flush_dcache_page(p) do { } while(0)
64# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66#endif
67
68#if (DCACHE_WAY_SIZE > PAGE_SIZE)
69extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70#endif
71#if (ICACHE_WAY_SIZE > PAGE_SIZE)
72extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
73#else
74# define __invalidate_icache_page_alias(v,p) do { } while(0)
75#endif
76
77/*
78 * We have physically tagged caches - nothing to do here -
79 * unless we have cache aliasing.
80 *
81 * Pages can get remapped. Because this might change the 'color' of that page,
82 * we have to flush the cache before the PTE is changed.
83 * (see also Documentation/cachetlb.txt)
84 */
85
86#if (DCACHE_WAY_SIZE > PAGE_SIZE)
87
88#define flush_cache_all() \
89 do { \
90 __flush_invalidate_dcache_all(); \
91 __invalidate_icache_all(); \
92 } while (0)
93
94#define flush_cache_mm(mm) flush_cache_all()
95#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
96
97#define flush_cache_vmap(start,end) flush_cache_all()
98#define flush_cache_vunmap(start,end) flush_cache_all()
99
100extern void flush_dcache_page(struct page*);
101extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
102extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
103
104#else
105
106#define flush_cache_all() do { } while (0)
107#define flush_cache_mm(mm) do { } while (0)
108#define flush_cache_dup_mm(mm) do { } while (0)
109
110#define flush_cache_vmap(start,end) do { } while (0)
111#define flush_cache_vunmap(start,end) do { } while (0)
112
113#define flush_dcache_page(page) do { } while (0)
114
115#define flush_cache_page(vma,addr,pfn) do { } while (0)
116#define flush_cache_range(vma,start,end) do { } while (0)
117
118#endif
119
120/* Ensure consistency between data and instruction cache. */
121#define flush_icache_range(start,end) \
122 do { \
123 __flush_dcache_range(start, (end) - (start)); \
124 __invalidate_icache_range(start,(end) - (start)); \
125 } while (0)
126
127/* This is not required, see Documentation/cachetlb.txt */
128#define flush_icache_page(vma,page) do { } while (0)
129
130#define flush_dcache_mmap_lock(mapping) do { } while (0)
131#define flush_dcache_mmap_unlock(mapping) do { } while (0)
132
133#if (DCACHE_WAY_SIZE > PAGE_SIZE)
134
135extern void copy_to_user_page(struct vm_area_struct*, struct page*,
136 unsigned long, void*, const void*, unsigned long);
137extern void copy_from_user_page(struct vm_area_struct*, struct page*,
138 unsigned long, void*, const void*, unsigned long);
139
140#else
141
142#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
143 do { \
144 memcpy(dst, src, len); \
145 __flush_dcache_range((unsigned long) dst, len); \
146 __invalidate_icache_range((unsigned long) dst, len); \
147 } while (0)
148
149#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
150 memcpy(dst, src, len)
151
152#endif
153
154#endif /* __KERNEL__ */
155#endif /* _XTENSA_CACHEFLUSH_H */