aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2006-03-28 15:00:40 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-03-28 15:00:40 -0500
commit23bdf86aa06ebe71bcbf6b7d25de9958c6ab33fa (patch)
tree56636558e8cdeee0739e7d8c82d66ffe625340b3 /arch/arm
parentde4533a04eb4f66dbef71f59a9c118256b886823 (diff)
[ARM] 3377/2: add support for intel xsc3 core
Patch from Lennert Buytenhek This patch adds support for the new XScale v3 core. This is an ARMv5 ISA core with the following additions: - L2 cache - I/O coherency support (on select chipsets) - Low-Locality Reference cache attributes (replaces mini-cache) - Supersections (v6 compatible) - 36-bit addressing (v6 compatible) - Single instruction cache line clean/invalidate - LRU cache replacement (vs round-robin) I attempted to merge the XSC3 support into proc-xscale.S, but XSC3 cores have separate errata and have to handle things like L2, so it is simpler to keep it separate. L2 cache support is currently a build option because the L2 enable bit must be set before we enable the MMU and there is no easy way to capture command line parameters at this point. There are still optimizations that can be done such as using LLR for copypage (in theory using the exisiting mini-cache code) but those can be addressed down the road. Signed-off-by: Deepak Saxena <dsaxena@plexity.net> Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/mm/Kconfig19
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/copypage-xsc3.S97
-rw-r--r--arch/arm/mm/mm-armv.c3
-rw-r--r--arch/arm/mm/proc-xsc3.S498
6 files changed, 618 insertions, 2 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 99c0d323719a..0f571d3d2fa4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -57,6 +57,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
57tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 57tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
58tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 58tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
59tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale 59tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
60tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
60tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) 61tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
61 62
62ifeq ($(CONFIG_AEABI),y) 63ifeq ($(CONFIG_AEABI),y)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e680c5fd93b5..c55b739e10ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -239,6 +239,17 @@ config CPU_XSCALE
239 select CPU_CACHE_VIVT 239 select CPU_CACHE_VIVT
240 select CPU_TLB_V4WBI 240 select CPU_TLB_V4WBI
241 241
242# XScale Core Version 3
243config CPU_XSC3
244 bool
245 depends on ARCH_IXP23XX
246 default y
247 select CPU_32v5
248 select CPU_ABRT_EV5T
249 select CPU_CACHE_VIVT
250 select CPU_TLB_V4WBI
251 select IO_36
252
242# ARMv6 253# ARMv6
243config CPU_V6 254config CPU_V6
244 bool "Support ARM V6 processor" 255 bool "Support ARM V6 processor"
@@ -361,11 +372,17 @@ config CPU_TLB_V4WBI
361config CPU_TLB_V6 372config CPU_TLB_V6
362 bool 373 bool
363 374
375#
376# CPU supports 36-bit I/O
377#
378config IO_36
379 bool
380
364comment "Processor Features" 381comment "Processor Features"
365 382
366config ARM_THUMB 383config ARM_THUMB
367 bool "Support Thumb user binaries" 384 bool "Support Thumb user binaries"
368 depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6 385 depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
369 default y 386 default y
370 help 387 help
371 Say Y if you want to include kernel support for running user space 388 Say Y if you want to include kernel support for running user space
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ffe73ba2bf17..07a538505784 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
30obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o 30obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o 31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o 32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
33obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
33 34
34obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o 35obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
35obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o 36obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
@@ -51,4 +52,5 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
51obj-$(CONFIG_CPU_SA110) += proc-sa110.o 52obj-$(CONFIG_CPU_SA110) += proc-sa110.o
52obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o 53obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
53obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 54obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
55obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
54obj-$(CONFIG_CPU_V6) += proc-v6.o 56obj-$(CONFIG_CPU_V6) += proc-v6.o
diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S
new file mode 100644
index 000000000000..9a2cb4332b4c
--- /dev/null
+++ b/arch/arm/mm/copypage-xsc3.S
@@ -0,0 +1,97 @@
1/*
2 * linux/arch/arm/lib/copypage-xsc3.S
3 *
4 * Copyright (C) 2004 Intel Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Adapted for 3rd gen XScale core, no more mini-dcache
11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12 */
13
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <asm/asm-offsets.h>
17
18/*
19 * General note:
20 * We don't really want write-allocate cache behaviour for these functions
21 * since that will just eat through 8K of the cache.
22 */
23
24 .text
25 .align 5
26/*
27 * XSC3 optimised copy_user_page
28 * r0 = destination
29 * r1 = source
30 * r2 = virtual user address of ultimate destination page
31 *
32 * The source page may have some clean entries in the cache already, but we
33 * can safely ignore them - break_cow() will flush them out of the cache
34 * if we eventually end up using our copied page.
35 *
36 */
37ENTRY(xsc3_mc_copy_user_page)
38 stmfd sp!, {r4, r5, lr}
39 mov lr, #PAGE_SZ/64-1
40
41 pld [r1, #0]
42 pld [r1, #32]
431: pld [r1, #64]
44 pld [r1, #96]
45
462: ldrd r2, [r1], #8
47 mov ip, r0
48 ldrd r4, [r1], #8
49 mcr p15, 0, ip, c7, c6, 1 @ invalidate
50 strd r2, [r0], #8
51 ldrd r2, [r1], #8
52 strd r4, [r0], #8
53 ldrd r4, [r1], #8
54 strd r2, [r0], #8
55 strd r4, [r0], #8
56 ldrd r2, [r1], #8
57 mov ip, r0
58 ldrd r4, [r1], #8
59 mcr p15, 0, ip, c7, c6, 1 @ invalidate
60 strd r2, [r0], #8
61 ldrd r2, [r1], #8
62 subs lr, lr, #1
63 strd r4, [r0], #8
64 ldrd r4, [r1], #8
65 strd r2, [r0], #8
66 strd r4, [r0], #8
67 bgt 1b
68 beq 2b
69
70 ldmfd sp!, {r4, r5, pc}
71
72 .align 5
73/*
74 * XScale optimised clear_user_page
75 * r0 = destination
76 * r1 = virtual user address of ultimate destination page
77 */
78ENTRY(xsc3_mc_clear_user_page)
79 mov r1, #PAGE_SZ/32
80 mov r2, #0
81 mov r3, #0
821: mcr p15, 0, r0, c7, c6, 1 @ invalidate line
83 strd r2, [r0], #8
84 strd r2, [r0], #8
85 strd r2, [r0], #8
86 strd r2, [r0], #8
87 subs r1, r1, #1
88 bne 1b
89 mov pc, lr
90
91 __INITDATA
92
93 .type xsc3_mc_user_fns, #object
94ENTRY(xsc3_mc_user_fns)
95 .long xsc3_mc_clear_user_page
96 .long xsc3_mc_copy_user_page
97 .size xsc3_mc_user_fns, . - xsc3_mc_user_fns
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index ef8d30a185a9..5e5d05bcad50 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -557,7 +557,8 @@ void __init create_mapping(struct map_desc *md)
557 * supersections are only allocated for domain 0 regardless 557 * supersections are only allocated for domain 0 regardless
558 * of the actual domain assignments in use. 558 * of the actual domain assignments in use.
559 */ 559 */
560 if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { 560 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
561 && domain == 0) {
561 /* 562 /*
562 * Align to supersection boundary if !high pages. 563 * Align to supersection boundary if !high pages.
563 * High pages have already been checked for proper 564 * High pages have already been checked for proper
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
new file mode 100644
index 000000000000..f90513e9af0c
--- /dev/null
+++ b/arch/arm/mm/proc-xsc3.S
@@ -0,0 +1,498 @@
1/*
2 * linux/arch/arm/mm/proc-xsc3.S
3 *
4 * Original Author: Matthew Gilbert
5 * Current Maintainer: Deepak Saxena <dsaxena@plexity.net>
6 *
7 * Copyright 2004 (C) Intel Corp.
8 * Copyright 2005 (c) MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an
15 * extension to Intel's original XScale core that adds the following
16 * features:
17 *
18 * - ARMv6 Supersections
19 * - Low Locality Reference pages (replaces mini-cache)
20 * - 36-bit addressing
21 * - L2 cache
22 * - Cache-coherency if chipset supports it
23 *
24 * Based on orignal XScale code by Nicolas Pitre
25 */
26
27#include <linux/linkage.h>
28#include <linux/init.h>
29#include <asm/assembler.h>
30#include <asm/procinfo.h>
31#include <asm/hardware.h>
32#include <asm/pgtable.h>
33#include <asm/page.h>
34#include <asm/ptrace.h>
35#include "proc-macros.S"
36
37/*
38 * This is the maximum size of an area which will be flushed. If the
39 * area is larger than this, then we flush the whole cache.
40 */
41#define MAX_AREA_SIZE 32768
42
43/*
44 * The cache line size of the I and D cache.
45 */
46#define CACHELINESIZE 32
47
48/*
49 * The size of the data cache.
50 */
51#define CACHESIZE 32768
52
53/*
54 * Run with L2 enabled.
55 */
56#define L2_CACHE_ENABLE 1
57
58/*
59 * Enable the Branch Target Buffer (can cause crashes, see erratum #42.)
60 */
61#define BTB_ENABLE 0
62
63/*
64 * This macro is used to wait for a CP15 write and is needed
65 * when we have to ensure that the last operation to the co-pro
66 * was completed before continuing with operation.
67 */
68 .macro cpwait_ret, lr, rd
69 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
70 sub pc, \lr, \rd, LSR #32 @ wait for completion and
71 @ flush instruction pipeline
72 .endm
73
74/*
75 * This macro cleans & invalidates the entire xsc3 dcache by set & way.
76 */
77
78 .macro clean_d_cache rd, rs
79 mov \rd, #0x1f00
80 orr \rd, \rd, #0x00e0
811: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way
82 adds \rd, \rd, #0x40000000
83 bcc 1b
84 subs \rd, \rd, #0x20
85 bpl 1b
86 .endm
87
88 .text
89
90/*
91 * cpu_xsc3_proc_init()
92 *
93 * Nothing too exciting at the moment
94 */
95ENTRY(cpu_xsc3_proc_init)
96 mov pc, lr
97
98/*
99 * cpu_xsc3_proc_fin()
100 */
101ENTRY(cpu_xsc3_proc_fin)
102 str lr, [sp, #-4]!
103 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
104 msr cpsr_c, r0
105 bl xsc3_flush_kern_cache_all @ clean caches
106 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
107 bic r0, r0, #0x1800 @ ...IZ...........
108 bic r0, r0, #0x0006 @ .............CA.
109 mcr p15, 0, r0, c1, c0, 0 @ disable caches
110 ldr pc, [sp], #4
111
112/*
113 * cpu_xsc3_reset(loc)
114 *
115 * Perform a soft reset of the system. Put the CPU into the
116 * same state as it would be if it had been reset, and branch
117 * to what would be the reset vector.
118 *
119 * loc: location to jump to for soft reset
120 */
121 .align 5
122ENTRY(cpu_xsc3_reset)
123 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
124 msr cpsr_c, r1 @ reset CPSR
125 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
126 bic r1, r1, #0x0086 @ ........B....CA.
127 bic r1, r1, #0x3900 @ ..VIZ..S........
128 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
129 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
130 bic r1, r1, #0x0001 @ ...............M
131 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
132 @ CAUTION: MMU turned off from this point. We count on the pipeline
133 @ already containing those two last instructions to survive.
134 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
135 mov pc, r0
136
137/*
138 * cpu_xsc3_do_idle()
139 *
140 * Cause the processor to idle
141 *
142 * For now we do nothing but go to idle mode for every case
143 *
144 * XScale supports clock switching, but using idle mode support
145 * allows external hardware to react to system state changes.
146
147 MMG: Come back to this one.
148 */
149 .align 5
150
151ENTRY(cpu_xsc3_do_idle)
152 mov r0, #1
153 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
154 mov pc, lr
155
156/* ================================= CACHE ================================ */
157
158/*
159 * flush_user_cache_all()
160 *
161 * Invalidate all cache entries in a particular address
162 * space.
163 */
164ENTRY(xsc3_flush_user_cache_all)
165 /* FALLTHROUGH */
166
167/*
168 * flush_kern_cache_all()
169 *
170 * Clean and invalidate the entire cache.
171 */
172ENTRY(xsc3_flush_kern_cache_all)
173 mov r2, #VM_EXEC
174 mov ip, #0
175__flush_whole_cache:
176 clean_d_cache r0, r1
177 tst r2, #VM_EXEC
178 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
179 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
180 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
181 mov pc, lr
182
183/*
184 * flush_user_cache_range(start, end, vm_flags)
185 *
186 * Invalidate a range of cache entries in the specified
187 * address space.
188 *
189 * - start - start address (may not be aligned)
190 * - end - end address (exclusive, may not be aligned)
191 * - vma - vma_area_struct describing address space
192 */
193 .align 5
194ENTRY(xsc3_flush_user_cache_range)
195 mov ip, #0
196 sub r3, r1, r0 @ calculate total size
197 cmp r3, #MAX_AREA_SIZE
198 bhs __flush_whole_cache
199
2001: tst r2, #VM_EXEC
201 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
202 mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line
203 add r0, r0, #CACHELINESIZE
204 cmp r0, r1
205 blo 1b
206 tst r2, #VM_EXEC
207 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
208 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
209 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
210 mov pc, lr
211
212/*
213 * coherent_kern_range(start, end)
214 *
215 * Ensure coherency between the Icache and the Dcache in the
216 * region described by start. If you have non-snooping
217 * Harvard caches, you need to implement this function.
218 *
219 * - start - virtual start address
220 * - end - virtual end address
221 *
222 * Note: single I-cache line invalidation isn't used here since
223 * it also trashes the mini I-cache used by JTAG debuggers.
224 */
225ENTRY(xsc3_coherent_kern_range)
226/* FALLTHROUGH */
227ENTRY(xsc3_coherent_user_range)
228 bic r0, r0, #CACHELINESIZE - 1
2291: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
230 add r0, r0, #CACHELINESIZE
231 cmp r0, r1
232 blo 1b
233 mov r0, #0
234 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
235 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
236 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
237 mov pc, lr
238
239/*
240 * flush_kern_dcache_page(void *page)
241 *
242 * Ensure no D cache aliasing occurs, either with itself or
243 * the I cache
244 *
245 * - addr - page aligned address
246 */
247ENTRY(xsc3_flush_kern_dcache_page)
248 add r1, r0, #PAGE_SZ
2491: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line
250 add r0, r0, #CACHELINESIZE
251 cmp r0, r1
252 blo 1b
253 mov r0, #0
254 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
255 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
256 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
257 mov pc, lr
258
259/*
260 * dma_inv_range(start, end)
261 *
262 * Invalidate (discard) the specified virtual address range.
263 * May not write back any entries. If 'start' or 'end'
264 * are not cache line aligned, those lines must be written
265 * back.
266 *
267 * - start - virtual start address
268 * - end - virtual end address
269 */
270ENTRY(xsc3_dma_inv_range)
271 tst r0, #CACHELINESIZE - 1
272 bic r0, r0, #CACHELINESIZE - 1
273 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry
274 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry
275 tst r1, #CACHELINESIZE - 1
276 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry
277 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry
2781: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry
279 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
280 add r0, r0, #CACHELINESIZE
281 cmp r0, r1
282 blo 1b
283 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
284 mov pc, lr
285
286/*
287 * dma_clean_range(start, end)
288 *
289 * Clean the specified virtual address range.
290 *
291 * - start - virtual start address
292 * - end - virtual end address
293 */
294ENTRY(xsc3_dma_clean_range)
295 bic r0, r0, #CACHELINESIZE - 1
2961: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry
297 mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry
298 add r0, r0, #CACHELINESIZE
299 cmp r0, r1
300 blo 1b
301 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
302 mov pc, lr
303
304/*
305 * dma_flush_range(start, end)
306 *
307 * Clean and invalidate the specified virtual address range.
308 *
309 * - start - virtual start address
310 * - end - virtual end address
311 */
312ENTRY(xsc3_dma_flush_range)
313 bic r0, r0, #CACHELINESIZE - 1
3141: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line
315 mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line
316 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
317 add r0, r0, #CACHELINESIZE
318 cmp r0, r1
319 blo 1b
320 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
321 mov pc, lr
322
323ENTRY(xsc3_cache_fns)
324 .long xsc3_flush_kern_cache_all
325 .long xsc3_flush_user_cache_all
326 .long xsc3_flush_user_cache_range
327 .long xsc3_coherent_kern_range
328 .long xsc3_coherent_user_range
329 .long xsc3_flush_kern_dcache_page
330 .long xsc3_dma_inv_range
331 .long xsc3_dma_clean_range
332 .long xsc3_dma_flush_range
333
334ENTRY(cpu_xsc3_dcache_clean_area)
3351: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
336 add r0, r0, #CACHELINESIZE
337 subs r1, r1, #CACHELINESIZE
338 bhi 1b
339 mov pc, lr
340
341/* =============================== PageTable ============================== */
342
343/*
344 * cpu_xsc3_switch_mm(pgd)
345 *
346 * Set the translation base pointer to be as described by pgd.
347 *
348 * pgd: new page tables
349 */
350 .align 5
351ENTRY(cpu_xsc3_switch_mm)
352 clean_d_cache r1, r2
353 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
354 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
355 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
356#ifdef L2_CACHE_ENABLE
357 orr r0, r0, #0x18 @ cache the page table in L2
358#endif
359 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
360 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
361 cpwait_ret lr, ip
362
363/*
364 * cpu_xsc3_set_pte(ptep, pte)
365 *
366 * Set a PTE and flush it out
367 *
368 */
369 .align 5
370ENTRY(cpu_xsc3_set_pte)
371 str r1, [r0], #-2048 @ linux version
372
373 bic r2, r1, #0xff0
374 orr r2, r2, #PTE_TYPE_EXT @ extended page
375
376 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
377
378 tst r3, #L_PTE_USER @ User?
379 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
380
381 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
382 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
383 @ combined with user -> user r/w
384
385#if L2_CACHE_ENABLE
386 @ If its cacheable it needs to be in L2 also.
387 eor ip, r1, #L_PTE_CACHEABLE
388 tst ip, #L_PTE_CACHEABLE
389 orreq r2, r2, #PTE_EXT_TEX(0x5)
390#endif
391
392 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
393 movne r2, #0 @ no -> fault
394
395 str r2, [r0] @ hardware version
396 mov ip, #0
397 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr
398 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
399 mov pc, lr
400
401 .ltorg
402
403 .align
404
405 __INIT
406
407 .type __xsc3_setup, #function
408__xsc3_setup:
409 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
410 msr cpsr_c, r0
411 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
412 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
413 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
414 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
415#if L2_CACHE_ENABLE
416 orr r4, r4, #0x18 @ cache the page table in L2
417#endif
418 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
419 mov r0, #1 @ Allow access to CP0 and CP13
420 orr r0, r0, #1 << 13 @ Its undefined whether this
421 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
422 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
423 and r0, r0, #2 @ preserve bit P bit setting
424#if L2_CACHE_ENABLE
425 orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
426#endif
427 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
428 mrc p15, 0, r0, c1, c0, 0 @ get control register
429 bic r0, r0, #0x0200 @ .... ..R. .... ....
430 bic r0, r0, #0x0002 @ .... .... .... ..A.
431 orr r0, r0, #0x0005 @ .... .... .... .C.M
432#if BTB_ENABLE
433 orr r0, r0, #0x3900 @ ..VI Z..S .... ....
434#else
435 orr r0, r0, #0x3100 @ ..VI ...S .... ....
436#endif
437#if L2_CACHE_ENABLE
438 orr r0, r0, #0x4000000 @ L2 enable
439#endif
440 mov pc, lr
441
442 .size __xsc3_setup, . - __xsc3_setup
443
444 __INITDATA
445
446/*
447 * Purpose : Function pointers used to access above functions - all calls
448 * come through these
449 */
450
451 .type xsc3_processor_functions, #object
452ENTRY(xsc3_processor_functions)
453 .word v5t_early_abort
454 .word cpu_xsc3_proc_init
455 .word cpu_xsc3_proc_fin
456 .word cpu_xsc3_reset
457 .word cpu_xsc3_do_idle
458 .word cpu_xsc3_dcache_clean_area
459 .word cpu_xsc3_switch_mm
460 .word cpu_xsc3_set_pte
461 .size xsc3_processor_functions, . - xsc3_processor_functions
462
463 .section ".rodata"
464
465 .type cpu_arch_name, #object
466cpu_arch_name:
467 .asciz "armv5te"
468 .size cpu_arch_name, . - cpu_arch_name
469
470 .type cpu_elf_name, #object
471cpu_elf_name:
472 .asciz "v5"
473 .size cpu_elf_name, . - cpu_elf_name
474
475 .type cpu_xsc3_name, #object
476cpu_xsc3_name:
477 .asciz "XScale-Core3"
478 .size cpu_xsc3_name, . - cpu_xsc3_name
479
480 .align
481
482 .section ".proc.info.init", #alloc, #execinstr
483
484 .type __xsc3_proc_info,#object
485__xsc3_proc_info:
486 .long 0x69056000
487 .long 0xffffe000
488 .long 0x00000c0e
489 b __xsc3_setup
490 .long cpu_arch_name
491 .long cpu_elf_name
492 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
493 .long cpu_xsc3_name
494 .long xsc3_processor_functions
495 .long v4wbi_tlb_fns
496 .long xsc3_mc_user_fns
497 .long xsc3_cache_fns
498 .size __xsc3_proc_info, . - __xsc3_proc_info