aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-fa.S
diff options
context:
space:
mode:
authorPaulius Zaleckas <paulius.zaleckas@teltonika.lt>2009-03-25 07:10:01 -0400
committerPaulius Zaleckas <paulius.zaleckas@teltonika.lt>2009-03-25 07:10:01 -0400
commit28853ac8fe5221de74a14f1182d7b2b383dfd85c (patch)
treedcfd9b20028e5a3a9504e26d2c9060f4746fc83a /arch/arm/mm/cache-fa.S
parentbba7d0b9ba0f04d25145de8170a17a3a07bbfdde (diff)
ARM: Add support for FA526 v2
Adds support for Faraday FA526 core. This core is used at least by: Cortina Systems Gemini and Centroid family Cavium Networks ECONA family Grain Media GM8120 Pixelplus ImageARM Prolific PL-1029 Faraday IP evaluation boards v2: - move TLB_BTB to separate patch - update copyrights Signed-off-by: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
Diffstat (limited to 'arch/arm/mm/cache-fa.S')
-rw-r--r--arch/arm/mm/cache-fa.S220
1 files changed, 220 insertions, 0 deletions
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
new file mode 100644
index 000000000000..b63a8f7b95cf
--- /dev/null
+++ b/arch/arm/mm/cache-fa.S
@@ -0,0 +1,220 @@
1/*
2 * linux/arch/arm/mm/cache-fa.S
3 *
4 * Copyright (C) 2005 Faraday Corp.
5 * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
6 *
7 * Based on cache-v4wb.S:
8 * Copyright (C) 1997-2002 Russell king
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Processors: FA520 FA526 FA626
15 */
16#include <linux/linkage.h>
17#include <linux/init.h>
18#include <asm/memory.h>
19#include <asm/page.h>
20
21#include "proc-macros.S"
22
23/*
24 * The size of one data cache line.
25 */
26#define CACHE_DLINESIZE 16
27
28/*
29 * The total size of the data cache.
30 */
31#ifdef CONFIG_ARCH_GEMINI
32#define CACHE_DSIZE 8192
33#else
34#define CACHE_DSIZE 16384
35#endif
36
37/* FIXME: put optimal value here. Current one is just estimation */
38#define CACHE_DLIMIT (CACHE_DSIZE * 2)
39
40/*
41 * flush_user_cache_all()
42 *
43 * Clean and invalidate all cache entries in a particular address
44 * space.
45 */
46ENTRY(fa_flush_user_cache_all)
47 /* FALLTHROUGH */
48/*
49 * flush_kern_cache_all()
50 *
51 * Clean and invalidate the entire cache.
52 */
53ENTRY(fa_flush_kern_cache_all)
54 mov ip, #0
55 mov r2, #VM_EXEC
56__flush_whole_cache:
57 mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache
58 tst r2, #VM_EXEC
59 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
60 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
61 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
62 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
63 mov pc, lr
64
65/*
66 * flush_user_cache_range(start, end, flags)
67 *
68 * Invalidate a range of cache entries in the specified
69 * address space.
70 *
71 * - start - start address (inclusive, page aligned)
72 * - end - end address (exclusive, page aligned)
73 * - flags - vma_area_struct flags describing address space
74 */
75ENTRY(fa_flush_user_cache_range)
76 mov ip, #0
77 sub r3, r1, r0 @ calculate total size
78 cmp r3, #CACHE_DLIMIT @ total size >= limit?
79 bhs __flush_whole_cache @ flush whole D cache
80
811: tst r2, #VM_EXEC
82 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line
83 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
84 add r0, r0, #CACHE_DLINESIZE
85 cmp r0, r1
86 blo 1b
87 tst r2, #VM_EXEC
88 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
89 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
90 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
91 mov pc, lr
92
93/*
94 * coherent_kern_range(start, end)
95 *
96 * Ensure coherency between the Icache and the Dcache in the
97 * region described by start. If you have non-snooping
98 * Harvard caches, you need to implement this function.
99 *
100 * - start - virtual start address
101 * - end - virtual end address
102 */
103ENTRY(fa_coherent_kern_range)
104 /* fall through */
105
106/*
107 * coherent_user_range(start, end)
108 *
109 * Ensure coherency between the Icache and the Dcache in the
110 * region described by start. If you have non-snooping
111 * Harvard caches, you need to implement this function.
112 *
113 * - start - virtual start address
114 * - end - virtual end address
115 */
116ENTRY(fa_coherent_user_range)
117 bic r0, r0, #CACHE_DLINESIZE - 1
1181: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
119 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
120 add r0, r0, #CACHE_DLINESIZE
121 cmp r0, r1
122 blo 1b
123 mov r0, #0
124 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
125 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
126 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
127 mov pc, lr
128
129/*
130 * flush_kern_dcache_page(kaddr)
131 *
132 * Ensure that the data held in the page kaddr is written back
133 * to the page in question.
134 *
135 * - kaddr - kernel address (guaranteed to be page aligned)
136 */
137ENTRY(fa_flush_kern_dcache_page)
138 add r1, r0, #PAGE_SZ
1391: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
140 add r0, r0, #CACHE_DLINESIZE
141 cmp r0, r1
142 blo 1b
143 mov r0, #0
144 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
145 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
146 mov pc, lr
147
148/*
149 * dma_inv_range(start, end)
150 *
151 * Invalidate (discard) the specified virtual address range.
152 * May not write back any entries. If 'start' or 'end'
153 * are not cache line aligned, those lines must be written
154 * back.
155 *
156 * - start - virtual start address
157 * - end - virtual end address
158 */
159ENTRY(fa_dma_inv_range)
160 tst r0, #CACHE_DLINESIZE - 1
161 bic r0, r0, #CACHE_DLINESIZE - 1
162 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
163 tst r1, #CACHE_DLINESIZE - 1
164 bic r1, r1, #CACHE_DLINESIZE - 1
165 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry
1661: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
167 add r0, r0, #CACHE_DLINESIZE
168 cmp r0, r1
169 blo 1b
170 mov r0, #0
171 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
172 mov pc, lr
173
174/*
175 * dma_clean_range(start, end)
176 *
177 * Clean (write back) the specified virtual address range.
178 *
179 * - start - virtual start address
180 * - end - virtual end address
181 */
182ENTRY(fa_dma_clean_range)
183 bic r0, r0, #CACHE_DLINESIZE - 1
1841: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
185 add r0, r0, #CACHE_DLINESIZE
186 cmp r0, r1
187 blo 1b
188 mov r0, #0
189 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
190 mov pc, lr
191
192/*
193 * dma_flush_range(start,end)
194 * - start - virtual start address of region
195 * - end - virtual end address of region
196 */
197ENTRY(fa_dma_flush_range)
198 bic r0, r0, #CACHE_DLINESIZE - 1
1991: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
200 add r0, r0, #CACHE_DLINESIZE
201 cmp r0, r1
202 blo 1b
203 mov r0, #0
204 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
205 mov pc, lr
206
207 __INITDATA
208
209 .type fa_cache_fns, #object
210ENTRY(fa_cache_fns)
211 .long fa_flush_kern_cache_all
212 .long fa_flush_user_cache_all
213 .long fa_flush_user_cache_range
214 .long fa_coherent_kern_range
215 .long fa_coherent_user_range
216 .long fa_flush_kern_dcache_page
217 .long fa_dma_inv_range
218 .long fa_dma_clean_range
219 .long fa_dma_flush_range
220 .size fa_cache_fns, . - fa_cache_fns