diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/mm/proc-xscale.S |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm/mm/proc-xscale.S')
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 934 |
1 files changed, 934 insertions, 0 deletions
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S new file mode 100644 index 000000000000..2d977b4eeeab --- /dev/null +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -0,0 +1,934 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/proc-xscale.S | ||
3 | * | ||
4 | * Author: Nicolas Pitre | ||
5 | * Created: November 2000 | ||
6 | * Copyright: (C) 2000, 2001 MontaVista Software Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * MMU functions for the Intel XScale CPUs | ||
13 | * | ||
14 | * 2001 Aug 21: | ||
15 | * some contributions by Brett Gaines <brett.w.gaines@intel.com> | ||
16 | * Copyright 2001 by Intel Corp. | ||
17 | * | ||
18 | * 2001 Sep 08: | ||
19 | * Completely revisited, many important fixes | ||
20 | * Nicolas Pitre <nico@cam.org> | ||
21 | */ | ||
22 | |||
23 | #include <linux/linkage.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <asm/assembler.h> | ||
26 | #include <asm/procinfo.h> | ||
27 | #include <asm/hardware.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/page.h> | ||
30 | #include <asm/ptrace.h> | ||
31 | #include "proc-macros.S" | ||
32 | |||
33 | /* | ||
34 | * This is the maximum size of an area which will be flushed. If the area | ||
35 | * is larger than this, then we flush the whole cache | ||
36 | */ | ||
37 | #define MAX_AREA_SIZE 32768 | ||
38 | |||
39 | /* | ||
40 | * the cache line size of the I and D cache | ||
41 | */ | ||
42 | #define CACHELINESIZE 32 | ||
43 | |||
44 | /* | ||
45 | * the size of the data cache | ||
46 | */ | ||
47 | #define CACHESIZE 32768 | ||
48 | |||
49 | /* | ||
50 | * Virtual address used to allocate the cache when flushed | ||
51 | * | ||
52 | * This must be an address range which is _never_ used. It should | ||
53 | * apparently have a mapping in the corresponding page table for | ||
54 | * compatibility with future CPUs that _could_ require it. For instance we | ||
55 | * don't care. | ||
56 | * | ||
57 | * This must be aligned on a 2*CACHESIZE boundary. The code selects one of | ||
58 | * the 2 areas in alternance each time the clean_d_cache macro is used. | ||
59 | * Without this the XScale core exhibits cache eviction problems and no one | ||
60 | * knows why. | ||
61 | * | ||
62 | * Reminder: the vector table is located at 0xffff0000-0xffff0fff. | ||
63 | */ | ||
64 | #define CLEAN_ADDR 0xfffe0000 | ||
65 | |||
66 | /* | ||
67 | * This macro is used to wait for a CP15 write and is needed | ||
68 | * when we have to ensure that the last operation to the co-pro | ||
69 | * was completed before continuing with operation. | ||
70 | */ | ||
71 | .macro cpwait, rd | ||
72 | mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 | ||
73 | mov \rd, \rd @ wait for completion | ||
74 | sub pc, pc, #4 @ flush instruction pipeline | ||
75 | .endm | ||
76 | |||
77 | .macro cpwait_ret, lr, rd | ||
78 | mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 | ||
79 | sub pc, \lr, \rd, LSR #32 @ wait for completion and | ||
80 | @ flush instruction pipeline | ||
81 | .endm | ||
82 | |||
83 | /* | ||
84 | * This macro cleans the entire dcache using line allocate. | ||
85 | * The main loop has been unrolled to reduce loop overhead. | ||
86 | * rd and rs are two scratch registers. | ||
87 | */ | ||
88 | .macro clean_d_cache, rd, rs | ||
89 | ldr \rs, =clean_addr | ||
90 | ldr \rd, [\rs] | ||
91 | eor \rd, \rd, #CACHESIZE | ||
92 | str \rd, [\rs] | ||
93 | add \rs, \rd, #CACHESIZE | ||
94 | 1: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line | ||
95 | add \rd, \rd, #CACHELINESIZE | ||
96 | mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line | ||
97 | add \rd, \rd, #CACHELINESIZE | ||
98 | mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line | ||
99 | add \rd, \rd, #CACHELINESIZE | ||
100 | mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line | ||
101 | add \rd, \rd, #CACHELINESIZE | ||
102 | teq \rd, \rs | ||
103 | bne 1b | ||
104 | .endm | ||
105 | |||
106 | .data | ||
107 | clean_addr: .word CLEAN_ADDR | ||
108 | |||
109 | .text | ||
110 | |||
111 | /* | ||
112 | * cpu_xscale_proc_init() | ||
113 | * | ||
114 | * Nothing too exciting at the moment | ||
115 | */ | ||
116 | ENTRY(cpu_xscale_proc_init) | ||
117 | mov pc, lr | ||
118 | |||
119 | /* | ||
120 | * cpu_xscale_proc_fin() | ||
121 | */ | ||
122 | ENTRY(cpu_xscale_proc_fin) | ||
123 | str lr, [sp, #-4]! | ||
124 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | ||
125 | msr cpsr_c, r0 | ||
126 | bl xscale_flush_kern_cache_all @ clean caches | ||
127 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | ||
128 | bic r0, r0, #0x1800 @ ...IZ........... | ||
129 | bic r0, r0, #0x0006 @ .............CA. | ||
130 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | ||
131 | ldr pc, [sp], #4 | ||
132 | |||
133 | /* | ||
134 | * cpu_xscale_reset(loc) | ||
135 | * | ||
136 | * Perform a soft reset of the system. Put the CPU into the | ||
137 | * same state as it would be if it had been reset, and branch | ||
138 | * to what would be the reset vector. | ||
139 | * | ||
140 | * loc: location to jump to for soft reset | ||
141 | */ | ||
142 | .align 5 | ||
143 | ENTRY(cpu_xscale_reset) | ||
144 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | ||
145 | msr cpsr_c, r1 @ reset CPSR | ||
146 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | ||
147 | bic r1, r1, #0x0086 @ ........B....CA. | ||
148 | bic r1, r1, #0x3900 @ ..VIZ..S........ | ||
149 | mcr p15, 0, r1, c1, c0, 0 @ ctrl register | ||
150 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB | ||
151 | bic r1, r1, #0x0001 @ ...............M | ||
152 | mcr p15, 0, r1, c1, c0, 0 @ ctrl register | ||
153 | @ CAUTION: MMU turned off from this point. We count on the pipeline | ||
154 | @ already containing those two last instructions to survive. | ||
155 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | ||
156 | mov pc, r0 | ||
157 | |||
158 | /* | ||
159 | * cpu_xscale_do_idle() | ||
160 | * | ||
161 | * Cause the processor to idle | ||
162 | * | ||
163 | * For now we do nothing but go to idle mode for every case | ||
164 | * | ||
165 | * XScale supports clock switching, but using idle mode support | ||
166 | * allows external hardware to react to system state changes. | ||
167 | */ | ||
168 | .align 5 | ||
169 | |||
170 | ENTRY(cpu_xscale_do_idle) | ||
171 | mov r0, #1 | ||
172 | mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE | ||
173 | mov pc, lr | ||
174 | |||
175 | /* ================================= CACHE ================================ */ | ||
176 | |||
177 | /* | ||
178 | * flush_user_cache_all() | ||
179 | * | ||
180 | * Invalidate all cache entries in a particular address | ||
181 | * space. | ||
182 | */ | ||
183 | ENTRY(xscale_flush_user_cache_all) | ||
184 | /* FALLTHROUGH */ | ||
185 | |||
186 | /* | ||
187 | * flush_kern_cache_all() | ||
188 | * | ||
189 | * Clean and invalidate the entire cache. | ||
190 | */ | ||
191 | ENTRY(xscale_flush_kern_cache_all) | ||
192 | mov r2, #VM_EXEC | ||
193 | mov ip, #0 | ||
194 | __flush_whole_cache: | ||
195 | clean_d_cache r0, r1 | ||
196 | tst r2, #VM_EXEC | ||
197 | mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB | ||
198 | mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
199 | mov pc, lr | ||
200 | |||
201 | /* | ||
202 | * flush_user_cache_range(start, end, vm_flags) | ||
203 | * | ||
204 | * Invalidate a range of cache entries in the specified | ||
205 | * address space. | ||
206 | * | ||
207 | * - start - start address (may not be aligned) | ||
208 | * - end - end address (exclusive, may not be aligned) | ||
209 | * - vma - vma_area_struct describing address space | ||
210 | */ | ||
211 | .align 5 | ||
212 | ENTRY(xscale_flush_user_cache_range) | ||
213 | mov ip, #0 | ||
214 | sub r3, r1, r0 @ calculate total size | ||
215 | cmp r3, #MAX_AREA_SIZE | ||
216 | bhs __flush_whole_cache | ||
217 | |||
218 | 1: tst r2, #VM_EXEC | ||
219 | mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line | ||
220 | mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line | ||
221 | mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line | ||
222 | add r0, r0, #CACHELINESIZE | ||
223 | cmp r0, r1 | ||
224 | blo 1b | ||
225 | tst r2, #VM_EXEC | ||
226 | mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB | ||
227 | mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
228 | mov pc, lr | ||
229 | |||
230 | /* | ||
231 | * coherent_kern_range(start, end) | ||
232 | * | ||
233 | * Ensure coherency between the Icache and the Dcache in the | ||
234 | * region described by start. If you have non-snooping | ||
235 | * Harvard caches, you need to implement this function. | ||
236 | * | ||
237 | * - start - virtual start address | ||
238 | * - end - virtual end address | ||
239 | * | ||
240 | * Note: single I-cache line invalidation isn't used here since | ||
241 | * it also trashes the mini I-cache used by JTAG debuggers. | ||
242 | */ | ||
243 | ENTRY(xscale_coherent_kern_range) | ||
244 | /* FALLTHROUGH */ | ||
245 | |||
246 | /* | ||
247 | * coherent_user_range(start, end) | ||
248 | * | ||
249 | * Ensure coherency between the Icache and the Dcache in the | ||
250 | * region described by start. If you have non-snooping | ||
251 | * Harvard caches, you need to implement this function. | ||
252 | * | ||
253 | * - start - virtual start address | ||
254 | * - end - virtual end address | ||
255 | * | ||
256 | * Note: single I-cache line invalidation isn't used here since | ||
257 | * it also trashes the mini I-cache used by JTAG debuggers. | ||
258 | */ | ||
259 | ENTRY(xscale_coherent_user_range) | ||
260 | bic r0, r0, #CACHELINESIZE - 1 | ||
261 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
262 | add r0, r0, #CACHELINESIZE | ||
263 | cmp r0, r1 | ||
264 | blo 1b | ||
265 | mov r0, #0 | ||
266 | mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB | ||
267 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
268 | mov pc, lr | ||
269 | |||
270 | /* | ||
271 | * flush_kern_dcache_page(void *page) | ||
272 | * | ||
273 | * Ensure no D cache aliasing occurs, either with itself or | ||
274 | * the I cache | ||
275 | * | ||
276 | * - addr - page aligned address | ||
277 | */ | ||
278 | ENTRY(xscale_flush_kern_dcache_page) | ||
279 | add r1, r0, #PAGE_SZ | ||
280 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
281 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | ||
282 | add r0, r0, #CACHELINESIZE | ||
283 | cmp r0, r1 | ||
284 | blo 1b | ||
285 | mov r0, #0 | ||
286 | mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB | ||
287 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
288 | mov pc, lr | ||
289 | |||
290 | /* | ||
291 | * dma_inv_range(start, end) | ||
292 | * | ||
293 | * Invalidate (discard) the specified virtual address range. | ||
294 | * May not write back any entries. If 'start' or 'end' | ||
295 | * are not cache line aligned, those lines must be written | ||
296 | * back. | ||
297 | * | ||
298 | * - start - virtual start address | ||
299 | * - end - virtual end address | ||
300 | */ | ||
301 | ENTRY(xscale_dma_inv_range) | ||
302 | mrc p15, 0, r2, c0, c0, 0 @ read ID | ||
303 | eor r2, r2, #0x69000000 | ||
304 | eor r2, r2, #0x00052000 | ||
305 | bics r2, r2, #1 | ||
306 | beq xscale_dma_flush_range | ||
307 | |||
308 | tst r0, #CACHELINESIZE - 1 | ||
309 | bic r0, r0, #CACHELINESIZE - 1 | ||
310 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | ||
311 | tst r1, #CACHELINESIZE - 1 | ||
312 | mcrne p15, 0, r1, c7, c10, 1 @ clean D entry | ||
313 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | ||
314 | add r0, r0, #CACHELINESIZE | ||
315 | cmp r0, r1 | ||
316 | blo 1b | ||
317 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
318 | mov pc, lr | ||
319 | |||
320 | /* | ||
321 | * dma_clean_range(start, end) | ||
322 | * | ||
323 | * Clean the specified virtual address range. | ||
324 | * | ||
325 | * - start - virtual start address | ||
326 | * - end - virtual end address | ||
327 | */ | ||
328 | ENTRY(xscale_dma_clean_range) | ||
329 | bic r0, r0, #CACHELINESIZE - 1 | ||
330 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
331 | add r0, r0, #CACHELINESIZE | ||
332 | cmp r0, r1 | ||
333 | blo 1b | ||
334 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
335 | mov pc, lr | ||
336 | |||
337 | /* | ||
338 | * dma_flush_range(start, end) | ||
339 | * | ||
340 | * Clean and invalidate the specified virtual address range. | ||
341 | * | ||
342 | * - start - virtual start address | ||
343 | * - end - virtual end address | ||
344 | */ | ||
345 | ENTRY(xscale_dma_flush_range) | ||
346 | bic r0, r0, #CACHELINESIZE - 1 | ||
347 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
348 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | ||
349 | add r0, r0, #CACHELINESIZE | ||
350 | cmp r0, r1 | ||
351 | blo 1b | ||
352 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
353 | mov pc, lr | ||
354 | |||
355 | ENTRY(xscale_cache_fns) | ||
356 | .long xscale_flush_kern_cache_all | ||
357 | .long xscale_flush_user_cache_all | ||
358 | .long xscale_flush_user_cache_range | ||
359 | .long xscale_coherent_kern_range | ||
360 | .long xscale_coherent_user_range | ||
361 | .long xscale_flush_kern_dcache_page | ||
362 | .long xscale_dma_inv_range | ||
363 | .long xscale_dma_clean_range | ||
364 | .long xscale_dma_flush_range | ||
365 | |||
366 | ENTRY(cpu_xscale_dcache_clean_area) | ||
367 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
368 | add r0, r0, #CACHELINESIZE | ||
369 | subs r1, r1, #CACHELINESIZE | ||
370 | bhi 1b | ||
371 | mov pc, lr | ||
372 | |||
373 | /* ================================ CACHE LOCKING============================ | ||
374 | * | ||
375 | * The XScale MicroArchitecture implements support for locking entries into | ||
376 | * the data and instruction cache. The following functions implement the core | ||
377 | * low level instructions needed to accomplish the locking. The developer's | ||
378 | * manual states that the code that performs the locking must be in non-cached | ||
379 | * memory. To accomplish this, the code in xscale-cache-lock.c copies the | ||
380 | * following functions from the cache into a non-cached memory region that | ||
381 | * is allocated through consistent_alloc(). | ||
382 | * | ||
383 | */ | ||
384 | .align 5 | ||
385 | /* | ||
386 | * xscale_icache_lock | ||
387 | * | ||
388 | * r0: starting address to lock | ||
389 | * r1: end address to lock | ||
390 | */ | ||
391 | ENTRY(xscale_icache_lock) | ||
392 | |||
393 | iLockLoop: | ||
394 | bic r0, r0, #CACHELINESIZE - 1 | ||
395 | mcr p15, 0, r0, c9, c1, 0 @ lock into cache | ||
396 | cmp r0, r1 @ are we done? | ||
397 | add r0, r0, #CACHELINESIZE @ advance to next cache line | ||
398 | bls iLockLoop | ||
399 | mov pc, lr | ||
400 | |||
401 | /* | ||
402 | * xscale_icache_unlock | ||
403 | */ | ||
404 | ENTRY(xscale_icache_unlock) | ||
405 | mcr p15, 0, r0, c9, c1, 1 @ Unlock icache | ||
406 | mov pc, lr | ||
407 | |||
408 | /* | ||
409 | * xscale_dcache_lock | ||
410 | * | ||
411 | * r0: starting address to lock | ||
412 | * r1: end address to lock | ||
413 | */ | ||
414 | ENTRY(xscale_dcache_lock) | ||
415 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
416 | mov r2, #1 | ||
417 | mcr p15, 0, r2, c9, c2, 0 @ Put dcache in lock mode | ||
418 | cpwait ip @ Wait for completion | ||
419 | |||
420 | mrs r2, cpsr | ||
421 | orr r3, r2, #PSR_F_BIT | PSR_I_BIT | ||
422 | dLockLoop: | ||
423 | msr cpsr_c, r3 | ||
424 | mcr p15, 0, r0, c7, c10, 1 @ Write back line if it is dirty | ||
425 | mcr p15, 0, r0, c7, c6, 1 @ Flush/invalidate line | ||
426 | msr cpsr_c, r2 | ||
427 | ldr ip, [r0], #CACHELINESIZE @ Preload 32 bytes into cache from | ||
428 | @ location [r0]. Post-increment | ||
429 | @ r3 to next cache line | ||
430 | cmp r0, r1 @ Are we done? | ||
431 | bls dLockLoop | ||
432 | |||
433 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
434 | mov r2, #0 | ||
435 | mcr p15, 0, r2, c9, c2, 0 @ Get out of lock mode | ||
436 | cpwait_ret lr, ip | ||
437 | |||
438 | /* | ||
439 | * xscale_dcache_unlock | ||
440 | */ | ||
441 | ENTRY(xscale_dcache_unlock) | ||
442 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
443 | mcr p15, 0, ip, c9, c2, 1 @ Unlock cache | ||
444 | mov pc, lr | ||
445 | |||
446 | /* | ||
447 | * Needed to determine the length of the code that needs to be copied. | ||
448 | */ | ||
449 | .align 5 | ||
450 | ENTRY(xscale_cache_dummy) | ||
451 | mov pc, lr | ||
452 | |||
453 | /* ================================ TLB LOCKING============================== | ||
454 | * | ||
455 | * The XScale MicroArchitecture implements support for locking entries into | ||
456 | * the Instruction and Data TLBs. The following functions provide the | ||
457 | * low level support for supporting these under Linux. xscale-lock.c | ||
458 | * implements some higher level management code. Most of the following | ||
459 | * is taken straight out of the Developer's Manual. | ||
460 | */ | ||
461 | |||
462 | /* | ||
463 | * Lock I-TLB entry | ||
464 | * | ||
465 | * r0: Virtual address to translate and lock | ||
466 | */ | ||
467 | .align 5 | ||
468 | ENTRY(xscale_itlb_lock) | ||
469 | mrs r2, cpsr | ||
470 | orr r3, r2, #PSR_F_BIT | PSR_I_BIT | ||
471 | msr cpsr_c, r3 @ Disable interrupts | ||
472 | mcr p15, 0, r0, c8, c5, 1 @ Invalidate I-TLB entry | ||
473 | mcr p15, 0, r0, c10, c4, 0 @ Translate and lock | ||
474 | msr cpsr_c, r2 @ Restore interrupts | ||
475 | cpwait_ret lr, ip | ||
476 | |||
477 | /* | ||
478 | * Lock D-TLB entry | ||
479 | * | ||
480 | * r0: Virtual address to translate and lock | ||
481 | */ | ||
482 | .align 5 | ||
483 | ENTRY(xscale_dtlb_lock) | ||
484 | mrs r2, cpsr | ||
485 | orr r3, r2, #PSR_F_BIT | PSR_I_BIT | ||
486 | msr cpsr_c, r3 @ Disable interrupts | ||
487 | mcr p15, 0, r0, c8, c6, 1 @ Invalidate D-TLB entry | ||
488 | mcr p15, 0, r0, c10, c8, 0 @ Translate and lock | ||
489 | msr cpsr_c, r2 @ Restore interrupts | ||
490 | cpwait_ret lr, ip | ||
491 | |||
492 | /* | ||
493 | * Unlock all I-TLB entries | ||
494 | */ | ||
495 | .align 5 | ||
496 | ENTRY(xscale_itlb_unlock) | ||
497 | mcr p15, 0, ip, c10, c4, 1 @ Unlock I-TLB | ||
498 | mcr p15, 0, ip, c8, c5, 0 @ Invalidate I-TLB | ||
499 | cpwait_ret lr, ip | ||
500 | |||
501 | /* | ||
502 | * Unlock all D-TLB entries | ||
503 | */ | ||
504 | ENTRY(xscale_dtlb_unlock) | ||
505 | mcr p15, 0, ip, c10, c8, 1 @ Unlock D-TBL | ||
506 | mcr p15, 0, ip, c8, c6, 0 @ Invalidate D-TLB | ||
507 | cpwait_ret lr, ip | ||
508 | |||
509 | /* =============================== PageTable ============================== */ | ||
510 | |||
511 | #define PTE_CACHE_WRITE_ALLOCATE 0 | ||
512 | |||
513 | /* | ||
514 | * cpu_xscale_switch_mm(pgd) | ||
515 | * | ||
516 | * Set the translation base pointer to be as described by pgd. | ||
517 | * | ||
518 | * pgd: new page tables | ||
519 | */ | ||
520 | .align 5 | ||
521 | ENTRY(cpu_xscale_switch_mm) | ||
522 | clean_d_cache r1, r2 | ||
523 | mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB | ||
524 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
525 | mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | ||
526 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | ||
527 | cpwait_ret lr, ip | ||
528 | |||
529 | /* | ||
530 | * cpu_xscale_set_pte(ptep, pte) | ||
531 | * | ||
532 | * Set a PTE and flush it out | ||
533 | * | ||
534 | * Errata 40: must set memory to write-through for user read-only pages. | ||
535 | */ | ||
536 | .align 5 | ||
537 | ENTRY(cpu_xscale_set_pte) | ||
538 | str r1, [r0], #-2048 @ linux version | ||
539 | |||
540 | bic r2, r1, #0xff0 | ||
541 | orr r2, r2, #PTE_TYPE_EXT @ extended page | ||
542 | |||
543 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | ||
544 | |||
545 | tst r3, #L_PTE_USER @ User? | ||
546 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w | ||
547 | |||
548 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? | ||
549 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w | ||
550 | @ combined with user -> user r/w | ||
551 | |||
552 | @ | ||
553 | @ Handle the X bit. We want to set this bit for the minicache | ||
554 | @ (U = E = B = W = 0, C = 1) or when write allocate is enabled, | ||
555 | @ and we have a writeable, cacheable region. If we ignore the | ||
556 | @ U and E bits, we can allow user space to use the minicache as | ||
557 | @ well. | ||
558 | @ | ||
559 | @ X = (C & ~W & ~B) | (C & W & B & write_allocate) | ||
560 | @ | ||
561 | eor ip, r1, #L_PTE_CACHEABLE | ||
562 | tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE | ||
563 | #if PTE_CACHE_WRITE_ALLOCATE | ||
564 | eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE | ||
565 | tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE | ||
566 | #endif | ||
567 | orreq r2, r2, #PTE_EXT_TEX(1) | ||
568 | |||
569 | @ | ||
570 | @ Erratum 40: The B bit must be cleared for a user read-only | ||
571 | @ cacheable page. | ||
572 | @ | ||
573 | @ B = B & ~(U & C & ~W) | ||
574 | @ | ||
575 | and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE | ||
576 | teq ip, #L_PTE_USER | L_PTE_CACHEABLE | ||
577 | biceq r2, r2, #PTE_BUFFERABLE | ||
578 | |||
579 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? | ||
580 | movne r2, #0 @ no -> fault | ||
581 | |||
582 | str r2, [r0] @ hardware version | ||
583 | mov ip, #0 | ||
584 | mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line | ||
585 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
586 | mov pc, lr | ||
587 | |||
588 | |||
589 | .ltorg | ||
590 | |||
591 | .align | ||
592 | |||
593 | __INIT | ||
594 | |||
595 | .type __xscale_setup, #function | ||
596 | __xscale_setup: | ||
597 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB | ||
598 | mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer | ||
599 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs | ||
600 | #ifdef CONFIG_IWMMXT | ||
601 | mov r0, #0 @ initially disallow access to CP0/CP1 | ||
602 | #else | ||
603 | mov r0, #1 @ Allow access to CP0 | ||
604 | #endif | ||
605 | orr r0, r0, #1 << 6 @ cp6 for IOP3xx and Bulverde | ||
606 | orr r0, r0, #1 << 13 @ Its undefined whether this | ||
607 | mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes | ||
608 | mrc p15, 0, r0, c1, c0, 0 @ get control register | ||
609 | ldr r5, xscale_cr1_clear | ||
610 | bic r0, r0, r5 | ||
611 | ldr r5, xscale_cr1_set | ||
612 | orr r0, r0, r5 | ||
613 | mov pc, lr | ||
614 | .size __xscale_setup, . - __xscale_setup | ||
615 | |||
616 | /* | ||
617 | * R | ||
618 | * .RVI ZFRS BLDP WCAM | ||
619 | * ..11 1.01 .... .101 | ||
620 | * | ||
621 | */ | ||
622 | .type xscale_cr1_clear, #object | ||
623 | .type xscale_cr1_set, #object | ||
624 | xscale_cr1_clear: | ||
625 | .word 0x3b07 | ||
626 | xscale_cr1_set: | ||
627 | .word 0x3905 | ||
628 | |||
629 | __INITDATA | ||
630 | |||
631 | /* | ||
632 | * Purpose : Function pointers used to access above functions - all calls | ||
633 | * come through these | ||
634 | */ | ||
635 | |||
636 | .type xscale_processor_functions, #object | ||
637 | ENTRY(xscale_processor_functions) | ||
638 | .word v5t_early_abort | ||
639 | .word cpu_xscale_proc_init | ||
640 | .word cpu_xscale_proc_fin | ||
641 | .word cpu_xscale_reset | ||
642 | .word cpu_xscale_do_idle | ||
643 | .word cpu_xscale_dcache_clean_area | ||
644 | .word cpu_xscale_switch_mm | ||
645 | .word cpu_xscale_set_pte | ||
646 | .size xscale_processor_functions, . - xscale_processor_functions | ||
647 | |||
648 | .section ".rodata" | ||
649 | |||
650 | .type cpu_arch_name, #object | ||
651 | cpu_arch_name: | ||
652 | .asciz "armv5te" | ||
653 | .size cpu_arch_name, . - cpu_arch_name | ||
654 | |||
655 | .type cpu_elf_name, #object | ||
656 | cpu_elf_name: | ||
657 | .asciz "v5" | ||
658 | .size cpu_elf_name, . - cpu_elf_name | ||
659 | |||
660 | .type cpu_80200_name, #object | ||
661 | cpu_80200_name: | ||
662 | .asciz "XScale-80200" | ||
663 | .size cpu_80200_name, . - cpu_80200_name | ||
664 | |||
665 | .type cpu_8032x_name, #object | ||
666 | cpu_8032x_name: | ||
667 | .asciz "XScale-IOP8032x Family" | ||
668 | .size cpu_8032x_name, . - cpu_8032x_name | ||
669 | |||
670 | .type cpu_8033x_name, #object | ||
671 | cpu_8033x_name: | ||
672 | .asciz "XScale-IOP8033x Family" | ||
673 | .size cpu_8033x_name, . - cpu_8033x_name | ||
674 | |||
675 | .type cpu_pxa250_name, #object | ||
676 | cpu_pxa250_name: | ||
677 | .asciz "XScale-PXA250" | ||
678 | .size cpu_pxa250_name, . - cpu_pxa250_name | ||
679 | |||
680 | .type cpu_pxa210_name, #object | ||
681 | cpu_pxa210_name: | ||
682 | .asciz "XScale-PXA210" | ||
683 | .size cpu_pxa210_name, . - cpu_pxa210_name | ||
684 | |||
685 | .type cpu_ixp42x_name, #object | ||
686 | cpu_ixp42x_name: | ||
687 | .asciz "XScale-IXP42x Family" | ||
688 | .size cpu_ixp42x_name, . - cpu_ixp42x_name | ||
689 | |||
690 | .type cpu_ixp46x_name, #object | ||
691 | cpu_ixp46x_name: | ||
692 | .asciz "XScale-IXP46x Family" | ||
693 | .size cpu_ixp46x_name, . - cpu_ixp46x_name | ||
694 | |||
695 | .type cpu_ixp2400_name, #object | ||
696 | cpu_ixp2400_name: | ||
697 | .asciz "XScale-IXP2400" | ||
698 | .size cpu_ixp2400_name, . - cpu_ixp2400_name | ||
699 | |||
700 | .type cpu_ixp2800_name, #object | ||
701 | cpu_ixp2800_name: | ||
702 | .asciz "XScale-IXP2800" | ||
703 | .size cpu_ixp2800_name, . - cpu_ixp2800_name | ||
704 | |||
705 | .type cpu_pxa255_name, #object | ||
706 | cpu_pxa255_name: | ||
707 | .asciz "XScale-PXA255" | ||
708 | .size cpu_pxa255_name, . - cpu_pxa255_name | ||
709 | |||
710 | .type cpu_pxa270_name, #object | ||
711 | cpu_pxa270_name: | ||
712 | .asciz "XScale-PXA270" | ||
713 | .size cpu_pxa270_name, . - cpu_pxa270_name | ||
714 | |||
715 | .align | ||
716 | |||
717 | .section ".proc.info", #alloc, #execinstr | ||
718 | |||
719 | .type __80200_proc_info,#object | ||
720 | __80200_proc_info: | ||
721 | .long 0x69052000 | ||
722 | .long 0xfffffff0 | ||
723 | .long PMD_TYPE_SECT | \ | ||
724 | PMD_SECT_BUFFERABLE | \ | ||
725 | PMD_SECT_CACHEABLE | \ | ||
726 | PMD_SECT_AP_WRITE | \ | ||
727 | PMD_SECT_AP_READ | ||
728 | b __xscale_setup | ||
729 | .long cpu_arch_name | ||
730 | .long cpu_elf_name | ||
731 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
732 | .long cpu_80200_name | ||
733 | .long xscale_processor_functions | ||
734 | .long v4wbi_tlb_fns | ||
735 | .long xscale_mc_user_fns | ||
736 | .long xscale_cache_fns | ||
737 | .size __80200_proc_info, . - __80200_proc_info | ||
738 | |||
739 | .type __8032x_proc_info,#object | ||
740 | __8032x_proc_info: | ||
741 | .long 0x69052420 | ||
742 | .long 0xfffff5e0 @ mask should accomodate IOP80219 also | ||
743 | .long PMD_TYPE_SECT | \ | ||
744 | PMD_SECT_BUFFERABLE | \ | ||
745 | PMD_SECT_CACHEABLE | \ | ||
746 | PMD_SECT_AP_WRITE | \ | ||
747 | PMD_SECT_AP_READ | ||
748 | b __xscale_setup | ||
749 | .long cpu_arch_name | ||
750 | .long cpu_elf_name | ||
751 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
752 | .long cpu_8032x_name | ||
753 | .long xscale_processor_functions | ||
754 | .long v4wbi_tlb_fns | ||
755 | .long xscale_mc_user_fns | ||
756 | .long xscale_cache_fns | ||
757 | .size __8032x_proc_info, . - __8032x_proc_info | ||
758 | |||
759 | .type __8033x_proc_info,#object | ||
760 | __8033x_proc_info: | ||
761 | .long 0x69054010 | ||
762 | .long 0xffffff30 | ||
763 | .long PMD_TYPE_SECT | \ | ||
764 | PMD_SECT_BUFFERABLE | \ | ||
765 | PMD_SECT_CACHEABLE | \ | ||
766 | PMD_SECT_AP_WRITE | \ | ||
767 | PMD_SECT_AP_READ | ||
768 | b __xscale_setup | ||
769 | .long cpu_arch_name | ||
770 | .long cpu_elf_name | ||
771 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
772 | .long cpu_8033x_name | ||
773 | .long xscale_processor_functions | ||
774 | .long v4wbi_tlb_fns | ||
775 | .long xscale_mc_user_fns | ||
776 | .long xscale_cache_fns | ||
777 | .size __8033x_proc_info, . - __8033x_proc_info | ||
778 | |||
779 | .type __pxa250_proc_info,#object | ||
780 | __pxa250_proc_info: | ||
781 | .long 0x69052100 | ||
782 | .long 0xfffff7f0 | ||
783 | .long PMD_TYPE_SECT | \ | ||
784 | PMD_SECT_BUFFERABLE | \ | ||
785 | PMD_SECT_CACHEABLE | \ | ||
786 | PMD_SECT_AP_WRITE | \ | ||
787 | PMD_SECT_AP_READ | ||
788 | b __xscale_setup | ||
789 | .long cpu_arch_name | ||
790 | .long cpu_elf_name | ||
791 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
792 | .long cpu_pxa250_name | ||
793 | .long xscale_processor_functions | ||
794 | .long v4wbi_tlb_fns | ||
795 | .long xscale_mc_user_fns | ||
796 | .long xscale_cache_fns | ||
797 | .size __pxa250_proc_info, . - __pxa250_proc_info | ||
798 | |||
799 | .type __pxa210_proc_info,#object | ||
800 | __pxa210_proc_info: | ||
801 | .long 0x69052120 | ||
802 | .long 0xfffff3f0 | ||
803 | .long PMD_TYPE_SECT | \ | ||
804 | PMD_SECT_BUFFERABLE | \ | ||
805 | PMD_SECT_CACHEABLE | \ | ||
806 | PMD_SECT_AP_WRITE | \ | ||
807 | PMD_SECT_AP_READ | ||
808 | b __xscale_setup | ||
809 | .long cpu_arch_name | ||
810 | .long cpu_elf_name | ||
811 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
812 | .long cpu_pxa210_name | ||
813 | .long xscale_processor_functions | ||
814 | .long v4wbi_tlb_fns | ||
815 | .long xscale_mc_user_fns | ||
816 | .long xscale_cache_fns | ||
817 | .size __pxa210_proc_info, . - __pxa210_proc_info | ||
818 | |||
819 | .type __ixp2400_proc_info, #object | ||
820 | __ixp2400_proc_info: | ||
821 | .long 0x69054190 | ||
822 | .long 0xfffffff0 | ||
823 | .long PMD_TYPE_SECT | \ | ||
824 | PMD_SECT_BUFFERABLE | \ | ||
825 | PMD_SECT_CACHEABLE | \ | ||
826 | PMD_SECT_AP_WRITE | \ | ||
827 | PMD_SECT_AP_READ | ||
828 | b __xscale_setup | ||
829 | .long cpu_arch_name | ||
830 | .long cpu_elf_name | ||
831 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
832 | .long cpu_ixp2400_name | ||
833 | .long xscale_processor_functions | ||
834 | .long v4wbi_tlb_fns | ||
835 | .long xscale_mc_user_fns | ||
836 | .long xscale_cache_fns | ||
837 | .size __ixp2400_proc_info, . - __ixp2400_proc_info | ||
838 | |||
839 | .type __ixp2800_proc_info, #object | ||
840 | __ixp2800_proc_info: | ||
841 | .long 0x690541a0 | ||
842 | .long 0xfffffff0 | ||
843 | .long PMD_TYPE_SECT | \ | ||
844 | PMD_SECT_BUFFERABLE | \ | ||
845 | PMD_SECT_CACHEABLE | \ | ||
846 | PMD_SECT_AP_WRITE | \ | ||
847 | PMD_SECT_AP_READ | ||
848 | b __xscale_setup | ||
849 | .long cpu_arch_name | ||
850 | .long cpu_elf_name | ||
851 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
852 | .long cpu_ixp2800_name | ||
853 | .long xscale_processor_functions | ||
854 | .long v4wbi_tlb_fns | ||
855 | .long xscale_mc_user_fns | ||
856 | .long xscale_cache_fns | ||
857 | .size __ixp2800_proc_info, . - __ixp2800_proc_info | ||
858 | |||
859 | .type __ixp42x_proc_info, #object | ||
860 | __ixp42x_proc_info: | ||
861 | .long 0x690541c0 | ||
862 | .long 0xffffffc0 | ||
863 | .long PMD_TYPE_SECT | \ | ||
864 | PMD_SECT_BUFFERABLE | \ | ||
865 | PMD_SECT_CACHEABLE | \ | ||
866 | PMD_SECT_AP_WRITE | \ | ||
867 | PMD_SECT_AP_READ | ||
868 | b __xscale_setup | ||
869 | .long cpu_arch_name | ||
870 | .long cpu_elf_name | ||
871 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
872 | .long cpu_ixp42x_name | ||
873 | .long xscale_processor_functions | ||
874 | .long v4wbi_tlb_fns | ||
875 | .long xscale_mc_user_fns | ||
876 | .long xscale_cache_fns | ||
877 | .size __ixp42x_proc_info, . - __ixp42x_proc_info | ||
878 | |||
879 | .type __ixp46x_proc_info, #object | ||
880 | __ixp46x_proc_info: | ||
881 | .long 0x69054200 | ||
882 | .long 0xffffff00 | ||
883 | .long 0x00000c0e | ||
884 | b __xscale_setup | ||
885 | .long cpu_arch_name | ||
886 | .long cpu_elf_name | ||
887 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
888 | .long cpu_ixp46x_name | ||
889 | .long xscale_processor_functions | ||
890 | .long v4wbi_tlb_fns | ||
891 | .long xscale_mc_user_fns | ||
892 | .long xscale_cache_fns | ||
893 | .size __ixp46x_proc_info, . - __ixp46x_proc_info | ||
894 | |||
895 | .type __pxa255_proc_info,#object | ||
896 | __pxa255_proc_info: | ||
897 | .long 0x69052d00 | ||
898 | .long 0xfffffff0 | ||
899 | .long PMD_TYPE_SECT | \ | ||
900 | PMD_SECT_BUFFERABLE | \ | ||
901 | PMD_SECT_CACHEABLE | \ | ||
902 | PMD_SECT_AP_WRITE | \ | ||
903 | PMD_SECT_AP_READ | ||
904 | b __xscale_setup | ||
905 | .long cpu_arch_name | ||
906 | .long cpu_elf_name | ||
907 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
908 | .long cpu_pxa255_name | ||
909 | .long xscale_processor_functions | ||
910 | .long v4wbi_tlb_fns | ||
911 | .long xscale_mc_user_fns | ||
912 | .long xscale_cache_fns | ||
913 | .size __pxa255_proc_info, . - __pxa255_proc_info | ||
914 | |||
915 | .type __pxa270_proc_info,#object | ||
916 | __pxa270_proc_info: | ||
917 | .long 0x69054110 | ||
918 | .long 0xfffffff0 | ||
919 | .long PMD_TYPE_SECT | \ | ||
920 | PMD_SECT_BUFFERABLE | \ | ||
921 | PMD_SECT_CACHEABLE | \ | ||
922 | PMD_SECT_AP_WRITE | \ | ||
923 | PMD_SECT_AP_READ | ||
924 | b __xscale_setup | ||
925 | .long cpu_arch_name | ||
926 | .long cpu_elf_name | ||
927 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
928 | .long cpu_pxa270_name | ||
929 | .long xscale_processor_functions | ||
930 | .long v4wbi_tlb_fns | ||
931 | .long xscale_mc_user_fns | ||
932 | .long xscale_cache_fns | ||
933 | .size __pxa270_proc_info, . - __pxa270_proc_info | ||
934 | |||