aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2007-02-04 18:55:27 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-02-08 09:48:44 -0500
commit850b42933e70c19c7765dd7fad15cb7ad3955b65 (patch)
treec57d1f3e65a26ad9af2f75540aea49be256322ac /arch
parentae0a846e411dc0b568e8ccda584896310ee5f369 (diff)
[ARM] 4123/1: xsc3: general cleanup
This patch cleans up proc-xsc3: - Correct a number of typos. - Fix up indentation in a number of places. - Change references to the various caches to be more clear about whether we're talking about the L1 D, the L1 I or the unified L2 cache. - Rename "drain write buffer" to "data write barrier", the official name used in the Manzano manual. - Change the xsc3 cpu name from "XScale-Core3" to "XScale-V3 based processor". Also, since a previously merged patch implements proper support for using a MAC or iWMMXt coprocessor on xsc3 platforms, we no longer need to enable access to CP0 on boot. Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/proc-xsc3.S151
1 files changed, 75 insertions, 76 deletions
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 94a58455f346..d95921a2ab99 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -5,23 +5,23 @@
5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org>
6 * 6 *
7 * Copyright 2004 (C) Intel Corp. 7 * Copyright 2004 (C) Intel Corp.
8 * Copyright 2005 (c) MontaVista Software, Inc. 8 * Copyright 2005 (C) MontaVista Software, Inc.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 * 13 *
14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an 14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is
15 * extension to Intel's original XScale core that adds the following 15 * an extension to Intel's original XScale core that adds the following
16 * features: 16 * features:
17 * 17 *
18 * - ARMv6 Supersections 18 * - ARMv6 Supersections
19 * - Low Locality Reference pages (replaces mini-cache) 19 * - Low Locality Reference pages (replaces mini-cache)
20 * - 36-bit addressing 20 * - 36-bit addressing
21 * - L2 cache 21 * - L2 cache
22 * - Cache-coherency if chipset supports it 22 * - Cache coherency if chipset supports it
23 * 23 *
24 * Based on orignal XScale code by Nicolas Pitre 24 * Based on original XScale code by Nicolas Pitre.
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
@@ -42,12 +42,12 @@
42#define MAX_AREA_SIZE 32768 42#define MAX_AREA_SIZE 32768
43 43
44/* 44/*
45 * The cache line size of the I and D cache. 45 * The cache line size of the L1 I, L1 D and unified L2 cache.
46 */ 46 */
47#define CACHELINESIZE 32 47#define CACHELINESIZE 32
48 48
49/* 49/*
50 * The size of the data cache. 50 * The size of the L1 D cache.
51 */ 51 */
52#define CACHESIZE 32768 52#define CACHESIZE 32768
53 53
@@ -57,9 +57,9 @@
57#define L2_CACHE_ENABLE 1 57#define L2_CACHE_ENABLE 1
58 58
59/* 59/*
60 * This macro is used to wait for a CP15 write and is needed 60 * This macro is used to wait for a CP15 write and is needed when we
61 * when we have to ensure that the last operation to the co-pro 61 * have to ensure that the last operation to the coprocessor was
62 * was completed before continuing with operation. 62 * completed before continuing with operation.
63 */ 63 */
64 .macro cpwait_ret, lr, rd 64 .macro cpwait_ret, lr, rd
65 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 65 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
@@ -68,13 +68,13 @@
68 .endm 68 .endm
69 69
70/* 70/*
71 * This macro cleans & invalidates the entire xsc3 dcache by set & way. 71 * This macro cleans and invalidates the entire L1 D cache.
72 */ 72 */
73 73
74 .macro clean_d_cache rd, rs 74 .macro clean_d_cache rd, rs
75 mov \rd, #0x1f00 75 mov \rd, #0x1f00
76 orr \rd, \rd, #0x00e0 76 orr \rd, \rd, #0x00e0
771: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way 771: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
78 adds \rd, \rd, #0x40000000 78 adds \rd, \rd, #0x40000000
79 bcc 1b 79 bcc 1b
80 subs \rd, \rd, #0x20 80 subs \rd, \rd, #0x20
@@ -119,15 +119,15 @@ ENTRY(cpu_xsc3_reset)
119 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 119 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
120 msr cpsr_c, r1 @ reset CPSR 120 msr cpsr_c, r1 @ reset CPSR
121 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 121 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
122 bic r1, r1, #0x0086 @ ........B....CA.
123 bic r1, r1, #0x3900 @ ..VIZ..S........ 122 bic r1, r1, #0x3900 @ ..VIZ..S........
123 bic r1, r1, #0x0086 @ ........B....CA.
124 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 124 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
125 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 125 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
126 bic r1, r1, #0x0001 @ ...............M 126 bic r1, r1, #0x0001 @ ...............M
127 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 127 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
128 @ CAUTION: MMU turned off from this point. We count on the pipeline 128 @ CAUTION: MMU turned off from this point. We count on the pipeline
129 @ already containing those two last instructions to survive. 129 @ already containing those two last instructions to survive.
130 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 130 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
131 mov pc, r0 131 mov pc, r0
132 132
133/* 133/*
@@ -139,14 +139,12 @@ ENTRY(cpu_xsc3_reset)
139 * 139 *
140 * XScale supports clock switching, but using idle mode support 140 * XScale supports clock switching, but using idle mode support
141 * allows external hardware to react to system state changes. 141 * allows external hardware to react to system state changes.
142
143 MMG: Come back to this one.
144 */ 142 */
145 .align 5 143 .align 5
146 144
147ENTRY(cpu_xsc3_do_idle) 145ENTRY(cpu_xsc3_do_idle)
148 mov r0, #1 146 mov r0, #1
149 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 147 mcr p14, 0, r0, c7, c0, 0 @ go to idle
150 mov pc, lr 148 mov pc, lr
151 149
152/* ================================= CACHE ================================ */ 150/* ================================= CACHE ================================ */
@@ -171,9 +169,9 @@ ENTRY(xsc3_flush_kern_cache_all)
171__flush_whole_cache: 169__flush_whole_cache:
172 clean_d_cache r0, r1 170 clean_d_cache r0, r1
173 tst r2, #VM_EXEC 171 tst r2, #VM_EXEC
174 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 172 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
175 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 173 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
176 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush 174 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
177 mov pc, lr 175 mov pc, lr
178 176
179/* 177/*
@@ -194,21 +192,21 @@ ENTRY(xsc3_flush_user_cache_range)
194 bhs __flush_whole_cache 192 bhs __flush_whole_cache
195 193
1961: tst r2, #VM_EXEC 1941: tst r2, #VM_EXEC
197 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 195 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
198 mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line 196 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
199 add r0, r0, #CACHELINESIZE 197 add r0, r0, #CACHELINESIZE
200 cmp r0, r1 198 cmp r0, r1
201 blo 1b 199 blo 1b
202 tst r2, #VM_EXEC 200 tst r2, #VM_EXEC
203 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 201 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
204 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 202 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
205 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush 203 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
206 mov pc, lr 204 mov pc, lr
207 205
208/* 206/*
209 * coherent_kern_range(start, end) 207 * coherent_kern_range(start, end)
210 * 208 *
211 * Ensure coherency between the Icache and the Dcache in the 209 * Ensure coherency between the I cache and the D cache in the
212 * region described by start. If you have non-snooping 210 * region described by start. If you have non-snooping
213 * Harvard caches, you need to implement this function. 211 * Harvard caches, you need to implement this function.
214 * 212 *
@@ -222,34 +220,34 @@ ENTRY(xsc3_coherent_kern_range)
222/* FALLTHROUGH */ 220/* FALLTHROUGH */
223ENTRY(xsc3_coherent_user_range) 221ENTRY(xsc3_coherent_user_range)
224 bic r0, r0, #CACHELINESIZE - 1 222 bic r0, r0, #CACHELINESIZE - 1
2251: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 2231: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
226 add r0, r0, #CACHELINESIZE 224 add r0, r0, #CACHELINESIZE
227 cmp r0, r1 225 cmp r0, r1
228 blo 1b 226 blo 1b
229 mov r0, #0 227 mov r0, #0
230 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 228 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
231 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 229 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
232 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush 230 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
233 mov pc, lr 231 mov pc, lr
234 232
235/* 233/*
236 * flush_kern_dcache_page(void *page) 234 * flush_kern_dcache_page(void *page)
237 * 235 *
238 * Ensure no D cache aliasing occurs, either with itself or 236 * Ensure no D cache aliasing occurs, either with itself or
239 * the I cache 237 * the I cache.
240 * 238 *
241 * - addr - page aligned address 239 * - addr - page aligned address
242 */ 240 */
243ENTRY(xsc3_flush_kern_dcache_page) 241ENTRY(xsc3_flush_kern_dcache_page)
244 add r1, r0, #PAGE_SZ 242 add r1, r0, #PAGE_SZ
2451: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line 2431: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
246 add r0, r0, #CACHELINESIZE 244 add r0, r0, #CACHELINESIZE
247 cmp r0, r1 245 cmp r0, r1
248 blo 1b 246 blo 1b
249 mov r0, #0 247 mov r0, #0
250 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 248 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
251 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 249 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
252 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush 250 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
253 mov pc, lr 251 mov pc, lr
254 252
255/* 253/*
@@ -266,17 +264,17 @@ ENTRY(xsc3_flush_kern_dcache_page)
266ENTRY(xsc3_dma_inv_range) 264ENTRY(xsc3_dma_inv_range)
267 tst r0, #CACHELINESIZE - 1 265 tst r0, #CACHELINESIZE - 1
268 bic r0, r0, #CACHELINESIZE - 1 266 bic r0, r0, #CACHELINESIZE - 1
269 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry 267 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
270 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry 268 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 line
271 tst r1, #CACHELINESIZE - 1 269 tst r1, #CACHELINESIZE - 1
272 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry 270 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line
273 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry 271 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 line
2741: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry 2721: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line
275 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line 273 mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
276 add r0, r0, #CACHELINESIZE 274 add r0, r0, #CACHELINESIZE
277 cmp r0, r1 275 cmp r0, r1
278 blo 1b 276 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 277 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
280 mov pc, lr 278 mov pc, lr
281 279
282/* 280/*
@@ -289,12 +287,12 @@ ENTRY(xsc3_dma_inv_range)
289 */ 287 */
290ENTRY(xsc3_dma_clean_range) 288ENTRY(xsc3_dma_clean_range)
291 bic r0, r0, #CACHELINESIZE - 1 289 bic r0, r0, #CACHELINESIZE - 1
2921: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry 2901: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
293 mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry 291 mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
294 add r0, r0, #CACHELINESIZE 292 add r0, r0, #CACHELINESIZE
295 cmp r0, r1 293 cmp r0, r1
296 blo 1b 294 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 295 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
298 mov pc, lr 296 mov pc, lr
299 297
300/* 298/*
@@ -307,13 +305,13 @@ ENTRY(xsc3_dma_clean_range)
307 */ 305 */
308ENTRY(xsc3_dma_flush_range) 306ENTRY(xsc3_dma_flush_range)
309 bic r0, r0, #CACHELINESIZE - 1 307 bic r0, r0, #CACHELINESIZE - 1
3101: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line 3081: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
311 mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line 309 mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
312 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line 310 mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
313 add r0, r0, #CACHELINESIZE 311 add r0, r0, #CACHELINESIZE
314 cmp r0, r1 312 cmp r0, r1
315 blo 1b 313 blo 1b
316 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 314 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
317 mov pc, lr 315 mov pc, lr
318 316
319ENTRY(xsc3_cache_fns) 317ENTRY(xsc3_cache_fns)
@@ -328,7 +326,7 @@ ENTRY(xsc3_cache_fns)
328 .long xsc3_dma_flush_range 326 .long xsc3_dma_flush_range
329 327
330ENTRY(cpu_xsc3_dcache_clean_area) 328ENTRY(cpu_xsc3_dcache_clean_area)
3311: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 3291: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
332 add r0, r0, #CACHELINESIZE 330 add r0, r0, #CACHELINESIZE
333 subs r1, r1, #CACHELINESIZE 331 subs r1, r1, #CACHELINESIZE
334 bhi 1b 332 bhi 1b
@@ -346,14 +344,14 @@ ENTRY(cpu_xsc3_dcache_clean_area)
346 .align 5 344 .align 5
347ENTRY(cpu_xsc3_switch_mm) 345ENTRY(cpu_xsc3_switch_mm)
348 clean_d_cache r1, r2 346 clean_d_cache r1, r2
349 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 347 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
350 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 348 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
351 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush 349 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
352#ifdef L2_CACHE_ENABLE 350#ifdef L2_CACHE_ENABLE
353 orr r0, r0, #0x18 @ cache the page table in L2 351 orr r0, r0, #0x18 @ cache the page table in L2
354#endif 352#endif
355 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 353 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
356 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 354 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
357 cpwait_ret lr, ip 355 cpwait_ret lr, ip
358 356
359/* 357/*
@@ -366,34 +364,34 @@ ENTRY(cpu_xsc3_switch_mm)
366ENTRY(cpu_xsc3_set_pte_ext) 364ENTRY(cpu_xsc3_set_pte_ext)
367 str r1, [r0], #-2048 @ linux version 365 str r1, [r0], #-2048 @ linux version
368 366
369 bic r2, r1, #0xff0 @ Keep C, B bits 367 bic r2, r1, #0xff0 @ keep C, B bits
370 orr r2, r2, #PTE_TYPE_EXT @ extended page 368 orr r2, r2, #PTE_TYPE_EXT @ extended page
371 tst r1, #L_PTE_SHARED @ Shared? 369 tst r1, #L_PTE_SHARED @ shared?
372 orrne r2, r2, #0x200 370 orrne r2, r2, #0x200
373 371
374 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 372 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
375 373
376 tst r3, #L_PTE_USER @ User? 374 tst r3, #L_PTE_USER @ user?
377 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 375 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
378 376
379 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 377 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
380 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 378 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
381 @ combined with user -> user r/w 379 @ combined with user -> user r/w
382 380
383#if L2_CACHE_ENABLE 381#if L2_CACHE_ENABLE
384 @ If its cacheable it needs to be in L2 also. 382 @ If it's cacheable, it needs to be in L2 also.
385 eor ip, r1, #L_PTE_CACHEABLE 383 eor ip, r1, #L_PTE_CACHEABLE
386 tst ip, #L_PTE_CACHEABLE 384 tst ip, #L_PTE_CACHEABLE
387 orreq r2, r2, #PTE_EXT_TEX(0x5) 385 orreq r2, r2, #PTE_EXT_TEX(0x5)
388#endif 386#endif
389 387
390 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 388 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
391 movne r2, #0 @ no -> fault 389 movne r2, #0 @ no -> fault
392 390
393 str r2, [r0] @ hardware version 391 str r2, [r0] @ hardware version
394 mov ip, #0 392 mov ip, #0
395 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr 393 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
396 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 394 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
397 mov pc, lr 395 mov pc, lr
398 396
399 .ltorg 397 .ltorg
@@ -406,17 +404,18 @@ ENTRY(cpu_xsc3_set_pte_ext)
406__xsc3_setup: 404__xsc3_setup:
407 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 405 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
408 msr cpsr_c, r0 406 msr cpsr_c, r0
409 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 407 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
410 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 408 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
411 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush 409 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
412 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 410 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
413#if L2_CACHE_ENABLE 411#if L2_CACHE_ENABLE
414 orr r4, r4, #0x18 @ cache the page table in L2 412 orr r4, r4, #0x18 @ cache the page table in L2
415#endif 413#endif
416 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 414 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
417 mov r0, #1 @ Allow access to CP0 and CP13 415
418 orr r0, r0, #1 << 13 @ Its undefined whether this 416 mov r0, #0 @ don't allow CP access
419 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 417 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
418
420 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 419 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
421 and r0, r0, #2 @ preserve bit P bit setting 420 and r0, r0, #2 @ preserve bit P bit setting
422#if L2_CACHE_ENABLE 421#if L2_CACHE_ENABLE
@@ -427,9 +426,9 @@ __xsc3_setup:
427 adr r5, xsc3_crval 426 adr r5, xsc3_crval
428 ldmia r5, {r5, r6} 427 ldmia r5, {r5, r6}
429 mrc p15, 0, r0, c1, c0, 0 @ get control register 428 mrc p15, 0, r0, c1, c0, 0 @ get control register
430 bic r0, r0, r5 @ .... .... .... ..A. 429 bic r0, r0, r5 @ ..V. ..R. .... ..A.
431 orr r0, r0, r6 @ .... .... .... .C.M 430 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
432 orr r0, r0, #0x00000800 @ ..VI Z..S .... .... 431 @ ...I Z..S .... .... (uc)
433#if L2_CACHE_ENABLE 432#if L2_CACHE_ENABLE
434 orr r0, r0, #0x04000000 @ L2 enable 433 orr r0, r0, #0x04000000 @ L2 enable
435#endif 434#endif
@@ -439,7 +438,7 @@ __xsc3_setup:
439 438
440 .type xsc3_crval, #object 439 .type xsc3_crval, #object
441xsc3_crval: 440xsc3_crval:
442 crval clear=0x04003b02, mmuset=0x00003105, ucset=0x00001100 441 crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900
443 442
444 __INITDATA 443 __INITDATA
445 444
@@ -474,7 +473,7 @@ cpu_elf_name:
474 473
475 .type cpu_xsc3_name, #object 474 .type cpu_xsc3_name, #object
476cpu_xsc3_name: 475cpu_xsc3_name:
477 .asciz "XScale-Core3" 476 .asciz "XScale-V3 based processor"
478 .size cpu_xsc3_name, . - cpu_xsc3_name 477 .size cpu_xsc3_name, . - cpu_xsc3_name
479 478
480 .align 479 .align
@@ -490,7 +489,7 @@ __xsc3_proc_info:
490 PMD_SECT_CACHEABLE | \ 489 PMD_SECT_CACHEABLE | \
491 PMD_SECT_AP_WRITE | \ 490 PMD_SECT_AP_WRITE | \
492 PMD_SECT_AP_READ 491 PMD_SECT_AP_READ
493 .long PMD_TYPE_SECT | \ 492 .long PMD_TYPE_SECT | \
494 PMD_SECT_AP_WRITE | \ 493 PMD_SECT_AP_WRITE | \
495 PMD_SECT_AP_READ 494 PMD_SECT_AP_READ
496 b __xsc3_setup 495 b __xsc3_setup