aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include
diff options
context:
space:
mode:
authorOskar Schirmer <os@emlix.com>2009-06-10 15:58:45 -0400
committerChris Zankel <chris@zankel.net>2009-06-22 05:36:50 -0400
commitbd974240c9a7c6c560504bf390cd8985a16b68f6 (patch)
treefdece95f79c93bfda475c5734da3e247d5f93d99 /arch/xtensa/include
parentf234012f52a37e48f2330e1ca2df69800e797c3b (diff)
xtensa: cache inquiry and unaligned cache handling functions
The existing xtensa cache handling functions work on page-aligned memory regions. These functions are needed for the s6000 dma engine which can work on a byte-granularity. Signed-off-by: Oskar Schirmer <os@emlix.com> Cc: Johannes Weiner <jw@emlix.com> Cc: Daniel Glockner <dg@emlix.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/cacheflush.h95
1 files changed, 95 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 8fc1c0c8de0..b7b8fbe47c7 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -155,5 +155,100 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
155 155
156#endif 156#endif
157 157
158#define XTENSA_CACHEBLK_LOG2 29
159#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
160#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
161
162#if XCHAL_HAVE_CACHEATTR
163static inline u32 xtensa_get_cacheattr(void)
164{
165 u32 r;
166 asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
167 return r;
168}
169
170static inline u32 xtensa_get_dtlb1(u32 addr)
171{
172 u32 r = addr & XTENSA_CACHEBLK_MASK;
173 return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
174 & 0xF);
175}
176#else
177static inline u32 xtensa_get_dtlb1(u32 addr)
178{
179 u32 r;
180 asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
181 asm volatile(" dsync");
182 return r;
183}
184
185static inline u32 xtensa_get_cacheattr(void)
186{
187 u32 r = 0;
188 u32 a = 0;
189 do {
190 a -= XTENSA_CACHEBLK_SIZE;
191 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
192 } while (a);
193 return r;
194}
195#endif
196
197static inline int xtensa_need_flush_dma_source(u32 addr)
198{
199 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
200}
201
202static inline int xtensa_need_invalidate_dma_destination(u32 addr)
203{
204 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
205}
206
207static inline void flush_dcache_unaligned(u32 addr, u32 size)
208{
209 u32 cnt;
210 if (size) {
211 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
212 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
213 while (cnt--) {
214 asm volatile(" dhwb %0, 0" : : "a"(addr));
215 addr += XCHAL_DCACHE_LINESIZE;
216 }
217 asm volatile(" dsync");
218 }
219}
220
221static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
222{
223 int cnt;
224 if (size) {
225 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
226 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
227 - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
228 while (cnt-- > 0) {
229 asm volatile(" dhi %0, %1" : : "a"(addr),
230 "n"(XCHAL_DCACHE_LINESIZE));
231 addr += XCHAL_DCACHE_LINESIZE;
232 }
233 asm volatile(" dhwbi %0, %1" : : "a"(addr),
234 "n"(XCHAL_DCACHE_LINESIZE));
235 asm volatile(" dsync");
236 }
237}
238
239static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
240{
241 u32 cnt;
242 if (size) {
243 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
244 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
245 while (cnt--) {
246 asm volatile(" dhwbi %0, 0" : : "a"(addr));
247 addr += XCHAL_DCACHE_LINESIZE;
248 }
249 asm volatile(" dsync");
250 }
251}
252
158#endif /* __KERNEL__ */ 253#endif /* __KERNEL__ */
159#endif /* _XTENSA_CACHEFLUSH_H */ 254#endif /* _XTENSA_CACHEFLUSH_H */