aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 16:05:57 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 16:05:57 -0400
commit932c37c375cca25175f9b6acee4c75d7a96d985f (patch)
treed7ba3620cd9a7a21c2de1bdfc7badd7637ed635e /arch/arm/mm
parentc855ff3718e5f667b463b20b9de516b4cd7625ad (diff)
parent805f53f085346b6765eda02820721a14ce0d644f (diff)
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (28 commits) ARM: OMAP: Fix GCC-reported compile time bug ARM: OMAP: restore CONFIG_GENERIC_TIME ARM: OMAP: partial LED fixes ARM: OMAP: add SoSSI clock (call propagate_rate for childrens) ARM: OMAP: FB sync with N800 tree (support for dynamic SRAM allocations) ARM: OMAP: Sync framebuffer headers with N800 tree ARM: OMAP: Mostly cosmetic to sync up with linux-omap tree ARM: OMAP: Fix gpmc header ARM: OMAP: Add mailbox support for IVA [ARM] armv7: add Makefile and Kconfig entries [ARM] armv7: add support for asid-tagged VIVT I-cache [ARM] armv7: add dedicated ARMv7 barrier instructions [ARM] armv7: Add ARMv7 cacheid macros [ARM] armv7: add support for ARMv7 cores. [ARM] Fix ARM branch relocation range [ARM] 4363/1: AT91: Remove legacy PIO definitions [ARM] 4361/1: AT91: Build error ARM: OMAP: Sync core code with linux-omap ARM: OMAP: Sync headers with linux-omap ARM: OMAP: h4 must have blinky leds!! ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig32
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/abort-ev7.S32
-rw-r--r--arch/arm/mm/cache-v7.S253
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/proc-macros.S12
-rw-r--r--arch/arm/mm/proc-v7.S262
7 files changed, 606 insertions, 5 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e684e9b38216..b81391a4e374 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -366,6 +366,19 @@ config CPU_32v6K
366 enabled will not boot on processors with do not support these 366 enabled will not boot on processors with do not support these
367 instructions. 367 instructions.
368 368
369# ARMv7
370config CPU_V7
371 bool "Support ARM V7 processor"
372 depends on ARCH_INTEGRATOR
373 select CPU_32v6K
374 select CPU_32v7
375 select CPU_ABRT_EV7
376 select CPU_CACHE_V7
377 select CPU_CACHE_VIPT
378 select CPU_CP15_MMU
379 select CPU_COPY_V6 if MMU
380 select CPU_TLB_V6 if MMU
381
369# Figure out what processor architecture version we should be using. 382# Figure out what processor architecture version we should be using.
370# This defines the compiler instruction set which depends on the machine type. 383# This defines the compiler instruction set which depends on the machine type.
371config CPU_32v3 384config CPU_32v3
@@ -391,6 +404,9 @@ config CPU_32v5
391config CPU_32v6 404config CPU_32v6
392 bool 405 bool
393 406
407config CPU_32v7
408 bool
409
394# The abort model 410# The abort model
395config CPU_ABRT_NOMMU 411config CPU_ABRT_NOMMU
396 bool 412 bool
@@ -413,6 +429,9 @@ config CPU_ABRT_EV5TJ
413config CPU_ABRT_EV6 429config CPU_ABRT_EV6
414 bool 430 bool
415 431
432config CPU_ABRT_EV7
433 bool
434
416# The cache model 435# The cache model
417config CPU_CACHE_V3 436config CPU_CACHE_V3
418 bool 437 bool
@@ -429,6 +448,9 @@ config CPU_CACHE_V4WB
429config CPU_CACHE_V6 448config CPU_CACHE_V6
430 bool 449 bool
431 450
451config CPU_CACHE_V7
452 bool
453
432config CPU_CACHE_VIVT 454config CPU_CACHE_VIVT
433 bool 455 bool
434 456
@@ -503,7 +525,7 @@ comment "Processor Features"
503 525
504config ARM_THUMB 526config ARM_THUMB
505 bool "Support Thumb user binaries" 527 bool "Support Thumb user binaries"
506 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 528 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 || CPU_V7
507 default y 529 default y
508 help 530 help
509 Say Y if you want to include kernel support for running user space 531 Say Y if you want to include kernel support for running user space
@@ -578,9 +600,15 @@ config CPU_CACHE_ROUND_ROBIN
578 Say Y here to use the predictable round-robin cache replacement 600 Say Y here to use the predictable round-robin cache replacement
579 policy. Unless you specifically require this or are unsure, say N. 601 policy. Unless you specifically require this or are unsure, say N.
580 602
603config CPU_L2CACHE_DISABLE
604 bool "Disable level 2 cache"
605 depends on CPU_V7
606 help
607 Say Y here to disable the level 2 cache. If unsure, say N.
608
581config CPU_BPREDICT_DISABLE 609config CPU_BPREDICT_DISABLE
582 bool "Disable branch prediction" 610 bool "Disable branch prediction"
583 depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 611 depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 || CPU_V7
584 help 612 help
585 Say Y here to disable branch prediction. If unsure, say N. 613 Say Y here to disable branch prediction. If unsure, say N.
586 614
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2f8b95947774..b5bd335ff14a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -24,12 +24,14 @@ obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
24obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o 24obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
25obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o 25obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
26obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o 26obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
27obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
27 28
28obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o 29obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
29obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 30obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
30obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 31obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
31obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o 32obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
32obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o 33obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
34obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
33 35
34obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o 36obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
35obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o 37obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
@@ -66,5 +68,6 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 68obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o 69obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
68obj-$(CONFIG_CPU_V6) += proc-v6.o 70obj-$(CONFIG_CPU_V6) += proc-v6.o
71obj-$(CONFIG_CPU_V7) += proc-v7.o
69 72
70obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o 73obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
new file mode 100644
index 000000000000..eb90bce38e14
--- /dev/null
+++ b/arch/arm/mm/abort-ev7.S
@@ -0,0 +1,32 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3/*
4 * Function: v7_early_abort
5 *
6 * Params : r2 = address of aborted instruction
7 * : r3 = saved SPSR
8 *
9 * Returns : r0 = address of abort
10 * : r1 = FSR, bit 11 = write
11 * : r2-r8 = corrupted
12 * : r9 = preserved
13 * : sp = pointer to registers
14 *
15 * Purpose : obtain information about current aborted instruction.
16 */
17 .align 5
18ENTRY(v7_early_abort)
19 /*
20 * The effect of data aborts on on the exclusive access monitor are
21 * UNPREDICTABLE. Do a CLREX to clear the state
22 */
23 clrex
24
25 mrc p15, 0, r1, c5, c0, 0 @ get FSR
26 mrc p15, 0, r0, c6, c0, 0 @ get FAR
27
28 /*
29 * V6 code adjusts the returned DFSR.
30 * New designs should not need to patch up faults.
31 */
32 mov pc, lr
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
new file mode 100644
index 000000000000..35ffc4d95997
--- /dev/null
+++ b/arch/arm/mm/cache-v7.S
@@ -0,0 +1,253 @@
1/*
2 * linux/arch/arm/mm/cache-v7.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2005 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is the "shell" of the ARMv7 processor support.
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/assembler.h>
16
17#include "proc-macros.S"
18
19/*
20 * v7_flush_dcache_all()
21 *
22 * Flush the whole D-cache.
23 *
24 * Corrupted registers: r0-r5, r7, r9-r11
25 *
26 * - mm - mm_struct describing address space
27 */
28ENTRY(v7_flush_dcache_all)
29 mrc p15, 1, r0, c0, c0, 1 @ read clidr
30 ands r3, r0, #0x7000000 @ extract loc from clidr
31 mov r3, r3, lsr #23 @ left align loc bit field
32 beq finished @ if loc is 0, then no need to clean
33 mov r10, #0 @ start clean at cache level 0
34loop1:
35 add r2, r10, r10, lsr #1 @ work out 3x current cache level
36 mov r1, r0, lsr r2 @ extract cache type bits from clidr
37 and r1, r1, #7 @ mask of the bits for current cache only
38 cmp r1, #2 @ see what cache we have at this level
39 blt skip @ skip if no cache, or just i-cache
40 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
41 isb @ isb to sych the new cssr&csidr
42 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
43 and r2, r1, #7 @ extract the length of the cache lines
44 add r2, r2, #4 @ add 4 (line length offset)
45 ldr r4, =0x3ff
46 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
47 clz r5, r4 @ find bit position of way size increment
48 ldr r7, =0x7fff
49 ands r7, r7, r1, lsr #13 @ extract max number of the index size
50loop2:
51 mov r9, r4 @ create working copy of max way size
52loop3:
53 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
54 orr r11, r11, r7, lsl r2 @ factor index number into r11
55 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
56 subs r9, r9, #1 @ decrement the way
57 bge loop3
58 subs r7, r7, #1 @ decrement the index
59 bge loop2
60skip:
61 add r10, r10, #2 @ increment cache number
62 cmp r3, r10
63 bgt loop1
64finished:
65 mov r10, #0 @ swith back to cache level 0
66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
67 isb
68 mov pc, lr
69
70/*
71 * v7_flush_cache_all()
72 *
73 * Flush the entire cache system.
74 * The data cache flush is now achieved using atomic clean / invalidates
75 * working outwards from L1 cache. This is done using Set/Way based cache
76 * maintainance instructions.
77 * The instruction cache can still be invalidated back to the point of
78 * unification in a single instruction.
79 *
80 */
81ENTRY(v7_flush_kern_cache_all)
82 stmfd sp!, {r4-r5, r7, r9-r11, lr}
83 bl v7_flush_dcache_all
84 mov r0, #0
85 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
86 ldmfd sp!, {r4-r5, r7, r9-r11, lr}
87 mov pc, lr
88
89/*
90 * v7_flush_cache_all()
91 *
92 * Flush all TLB entries in a particular address space
93 *
94 * - mm - mm_struct describing address space
95 */
96ENTRY(v7_flush_user_cache_all)
97 /*FALLTHROUGH*/
98
99/*
100 * v7_flush_cache_range(start, end, flags)
101 *
102 * Flush a range of TLB entries in the specified address space.
103 *
104 * - start - start address (may not be aligned)
105 * - end - end address (exclusive, may not be aligned)
106 * - flags - vm_area_struct flags describing address space
107 *
108 * It is assumed that:
109 * - we have a VIPT cache.
110 */
111ENTRY(v7_flush_user_cache_range)
112 mov pc, lr
113
114/*
115 * v7_coherent_kern_range(start,end)
116 *
117 * Ensure that the I and D caches are coherent within specified
118 * region. This is typically used when code has been written to
119 * a memory region, and will be executed.
120 *
121 * - start - virtual start address of region
122 * - end - virtual end address of region
123 *
124 * It is assumed that:
125 * - the Icache does not read data from the write buffer
126 */
127ENTRY(v7_coherent_kern_range)
128 /* FALLTHROUGH */
129
130/*
131 * v7_coherent_user_range(start,end)
132 *
133 * Ensure that the I and D caches are coherent within specified
134 * region. This is typically used when code has been written to
135 * a memory region, and will be executed.
136 *
137 * - start - virtual start address of region
138 * - end - virtual end address of region
139 *
140 * It is assumed that:
141 * - the Icache does not read data from the write buffer
142 */
143ENTRY(v7_coherent_user_range)
144 dcache_line_size r2, r3
145 sub r3, r2, #1
146 bic r0, r0, r3
1471: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
148 dsb
149 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
150 add r0, r0, r2
151 cmp r0, r1
152 blo 1b
153 mov r0, #0
154 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
155 dsb
156 isb
157 mov pc, lr
158
159/*
160 * v7_flush_kern_dcache_page(kaddr)
161 *
162 * Ensure that the data held in the page kaddr is written back
163 * to the page in question.
164 *
165 * - kaddr - kernel address (guaranteed to be page aligned)
166 */
167ENTRY(v7_flush_kern_dcache_page)
168 dcache_line_size r2, r3
169 add r1, r0, #PAGE_SZ
1701:
171 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
172 add r0, r0, r2
173 cmp r0, r1
174 blo 1b
175 dsb
176 mov pc, lr
177
178/*
179 * v7_dma_inv_range(start,end)
180 *
181 * Invalidate the data cache within the specified region; we will
182 * be performing a DMA operation in this region and we want to
183 * purge old data in the cache.
184 *
185 * - start - virtual start address of region
186 * - end - virtual end address of region
187 */
188ENTRY(v7_dma_inv_range)
189 dcache_line_size r2, r3
190 sub r3, r2, #1
191 tst r0, r3
192 bic r0, r0, r3
193 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
194
195 tst r1, r3
196 bic r1, r1, r3
197 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1981:
199 mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
200 add r0, r0, r2
201 cmp r0, r1
202 blo 1b
203 dsb
204 mov pc, lr
205
206/*
207 * v7_dma_clean_range(start,end)
208 * - start - virtual start address of region
209 * - end - virtual end address of region
210 */
211ENTRY(v7_dma_clean_range)
212 dcache_line_size r2, r3
213 sub r3, r2, #1
214 bic r0, r0, r3
2151:
216 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
217 add r0, r0, r2
218 cmp r0, r1
219 blo 1b
220 dsb
221 mov pc, lr
222
223/*
224 * v7_dma_flush_range(start,end)
225 * - start - virtual start address of region
226 * - end - virtual end address of region
227 */
228ENTRY(v7_dma_flush_range)
229 dcache_line_size r2, r3
230 sub r3, r2, #1
231 bic r0, r0, r3
2321:
233 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
234 add r0, r0, r2
235 cmp r0, r1
236 blo 1b
237 dsb
238 mov pc, lr
239
240 __INITDATA
241
242 .type v7_cache_fns, #object
243ENTRY(v7_cache_fns)
244 .long v7_flush_kern_cache_all
245 .long v7_flush_user_cache_all
246 .long v7_flush_user_cache_range
247 .long v7_coherent_kern_range
248 .long v7_coherent_user_range
249 .long v7_flush_kern_dcache_page
250 .long v7_dma_inv_range
251 .long v7_dma_clean_range
252 .long v7_dma_flush_range
253 .size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 9da43a0fdcdf..fc84fcc74380 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,7 +14,8 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16 16
17unsigned int cpu_last_asid = { 1 << ASID_BITS }; 17static DEFINE_SPINLOCK(cpu_asid_lock);
18unsigned int cpu_last_asid = ASID_FIRST_VERSION;
18 19
19/* 20/*
20 * We fork()ed a process, and we need a new context for the child 21 * We fork()ed a process, and we need a new context for the child
@@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm)
31{ 32{
32 unsigned int asid; 33 unsigned int asid;
33 34
35 spin_lock(&cpu_asid_lock);
34 asid = ++cpu_last_asid; 36 asid = ++cpu_last_asid;
35 if (asid == 0) 37 if (asid == 0)
36 asid = cpu_last_asid = 1 << ASID_BITS; 38 asid = cpu_last_asid = ASID_FIRST_VERSION;
37 39
38 /* 40 /*
39 * If we've used up all our ASIDs, we need 41 * If we've used up all our ASIDs, we need
40 * to start a new version and flush the TLB. 42 * to start a new version and flush the TLB.
41 */ 43 */
42 if ((asid & ~ASID_MASK) == 0) { 44 if (unlikely((asid & ~ASID_MASK) == 0)) {
43 asid = ++cpu_last_asid; 45 asid = ++cpu_last_asid;
44 /* set the reserved ASID before flushing the TLB */ 46 /* set the reserved ASID before flushing the TLB */
45 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" 47 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
@@ -47,7 +49,16 @@ void __new_context(struct mm_struct *mm)
47 : "r" (0)); 49 : "r" (0));
48 isb(); 50 isb();
49 flush_tlb_all(); 51 flush_tlb_all();
52 if (icache_is_vivt_asid_tagged()) {
53 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
54 "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
55 :
56 : "r" (0));
57 dsb();
58 }
50 } 59 }
60 spin_unlock(&cpu_asid_lock);
51 61
62 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
52 mm->context.id = asid; 63 mm->context.id = asid;
53} 64}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 9e2c89eb2115..b13150052a76 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -59,3 +59,15 @@
59 .word \ucset 59 .word \ucset
60#endif 60#endif
61 .endm 61 .endm
62
63/*
64 * cache_line_size - get the cache line size from the CSIDR register
65 * (available on ARMv7+). It assumes that the CSSR register was configured
66 * to access the L1 data cache CSIDR.
67 */
68 .macro dcache_line_size, reg, tmp
69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
70 and \tmp, \tmp, #7 @ cache line size encoding
71 mov \reg, #16 @ size offset
72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
new file mode 100644
index 000000000000..dd823dd4a374
--- /dev/null
+++ b/arch/arm/mm/proc-v7.S
@@ -0,0 +1,262 @@
1/*
2 * linux/arch/arm/mm/proc-v7.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This is the "shell" of the ARMv7 processor support.
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/elf.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h>
18
19#include "proc-macros.S"
20
21#define TTB_C (1 << 0)
22#define TTB_S (1 << 1)
23#define TTB_RGN_OC_WT (2 << 3)
24#define TTB_RGN_OC_WB (3 << 3)
25
26ENTRY(cpu_v7_proc_init)
27 mov pc, lr
28
29ENTRY(cpu_v7_proc_fin)
30 mov pc, lr
31
32/*
33 * cpu_v7_reset(loc)
34 *
35 * Perform a soft reset of the system. Put the CPU into the
36 * same state as it would be if it had been reset, and branch
37 * to what would be the reset vector.
38 *
39 * - loc - location to jump to for soft reset
40 *
41 * It is assumed that:
42 */
43 .align 5
44ENTRY(cpu_v7_reset)
45 mov pc, r0
46
47/*
48 * cpu_v7_do_idle()
49 *
50 * Idle the processor (eg, wait for interrupt).
51 *
52 * IRQs are already disabled.
53 */
54ENTRY(cpu_v7_do_idle)
55 .long 0xe320f003 @ ARM V7 WFI instruction
56 mov pc, lr
57
58ENTRY(cpu_v7_dcache_clean_area)
59#ifndef TLB_CAN_READ_FROM_L1_CACHE
60 dcache_line_size r2, r3
611: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
62 add r0, r0, r2
63 subs r1, r1, r2
64 bhi 1b
65 dsb
66#endif
67 mov pc, lr
68
69/*
70 * cpu_v7_switch_mm(pgd_phys, tsk)
71 *
72 * Set the translation table base pointer to be pgd_phys
73 *
74 * - pgd_phys - physical address of new TTB
75 *
76 * It is assumed that:
77 * - we are not using split page tables
78 */
79ENTRY(cpu_v7_switch_mm)
80 mov r2, #0
81 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
82 orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
83 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
84 isb
851: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
86 isb
87 mcr p15, 0, r1, c13, c0, 1 @ set context ID
88 isb
89 mov pc, lr
90
91/*
92 * cpu_v7_set_pte_ext(ptep, pte)
93 *
94 * Set a level 2 translation table entry.
95 *
96 * - ptep - pointer to level 2 translation table entry
97 * (hardware version is stored at -1024 bytes)
98 * - pte - PTE value to store
99 * - ext - value for extended PTE bits
100 *
101 * Permissions:
102 * YUWD APX AP1 AP0 SVC User
103 * 0xxx 0 0 0 no acc no acc
104 * 100x 1 0 1 r/o no acc
105 * 10x0 1 0 1 r/o no acc
106 * 1011 0 0 1 r/w no acc
107 * 110x 0 1 0 r/w r/o
108 * 11x0 0 1 0 r/w r/o
109 * 1111 0 1 1 r/w r/w
110 */
111ENTRY(cpu_v7_set_pte_ext)
112 str r1, [r0], #-2048 @ linux version
113
114 bic r3, r1, #0x000003f0
115 bic r3, r3, #0x00000003
116 orr r3, r3, r2
117 orr r3, r3, #PTE_EXT_AP0 | 2
118
119 tst r1, #L_PTE_WRITE
120 tstne r1, #L_PTE_DIRTY
121 orreq r3, r3, #PTE_EXT_APX
122
123 tst r1, #L_PTE_USER
124 orrne r3, r3, #PTE_EXT_AP1
125 tstne r3, #PTE_EXT_APX
126 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
127
128 tst r1, #L_PTE_YOUNG
129 biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
130
131 tst r1, #L_PTE_EXEC
132 orreq r3, r3, #PTE_EXT_XN
133
134 tst r1, #L_PTE_PRESENT
135 moveq r3, #0
136
137 str r3, [r0]
138 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
139 mov pc, lr
140
141cpu_v7_name:
142 .ascii "ARMv7 Processor"
143 .align
144
145 .section ".text.init", #alloc, #execinstr
146
147/*
148 * __v7_setup
149 *
150 * Initialise TLB, Caches, and MMU state ready to switch the MMU
151 * on. Return in r0 the new CP15 C1 control register setting.
152 *
153 * We automatically detect if we have a Harvard cache, and use the
154 * Harvard cache control instructions insead of the unified cache
155 * control instructions.
156 *
157 * This should be able to cover all ARMv7 cores.
158 *
159 * It is assumed that:
160 * - cache type register is implemented
161 */
162__v7_setup:
163 adr r12, __v7_setup_stack @ the local stack
164 stmia r12, {r0-r5, r7, r9, r11, lr}
165 bl v7_flush_dcache_all
166 ldmia r12, {r0-r5, r7, r9, r11, lr}
167 mov r10, #0
168#ifdef HARVARD_CACHE
169 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
170#endif
171 dsb
172 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
173 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
174 orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
175 mcr p15, 0, r4, c2, c0, 0 @ load TTB0
176 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
177 mov r10, #0x1f @ domains 0, 1 = manager
178 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
179#ifndef CONFIG_CPU_L2CACHE_DISABLE
180 @ L2 cache configuration in the L2 aux control register
181 mrc p15, 1, r10, c9, c0, 2
182 bic r10, r10, #(1 << 16) @ L2 outer cache
183 mcr p15, 1, r10, c9, c0, 2
184 @ L2 cache is enabled in the aux control register
185 mrc p15, 0, r10, c1, c0, 1
186 orr r10, r10, #2
187 mcr p15, 0, r10, c1, c0, 1
188#endif
189 mrc p15, 0, r0, c1, c0, 0 @ read control register
190 ldr r10, cr1_clear @ get mask for bits to clear
191 bic r0, r0, r10 @ clear bits them
192 ldr r10, cr1_set @ get mask for bits to set
193 orr r0, r0, r10 @ set them
194 mov pc, lr @ return to head.S:__ret
195
196 /*
197 * V X F I D LR
198 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
199 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
200 * 0 110 0011 1.00 .111 1101 < we want
201 */
202 .type cr1_clear, #object
203 .type cr1_set, #object
204cr1_clear:
205 .word 0x0120c302
206cr1_set:
207 .word 0x00c0387d
208
209__v7_setup_stack:
210 .space 4 * 11 @ 11 registers
211
212 .type v7_processor_functions, #object
213ENTRY(v7_processor_functions)
214 .word v7_early_abort
215 .word cpu_v7_proc_init
216 .word cpu_v7_proc_fin
217 .word cpu_v7_reset
218 .word cpu_v7_do_idle
219 .word cpu_v7_dcache_clean_area
220 .word cpu_v7_switch_mm
221 .word cpu_v7_set_pte_ext
222 .size v7_processor_functions, . - v7_processor_functions
223
224 .type cpu_arch_name, #object
225cpu_arch_name:
226 .asciz "armv7"
227 .size cpu_arch_name, . - cpu_arch_name
228
229 .type cpu_elf_name, #object
230cpu_elf_name:
231 .asciz "v7"
232 .size cpu_elf_name, . - cpu_elf_name
233 .align
234
235 .section ".proc.info.init", #alloc, #execinstr
236
237 /*
238 * Match any ARMv7 processor core.
239 */
240 .type __v7_proc_info, #object
241__v7_proc_info:
242 .long 0x000f0000 @ Required ID value
243 .long 0x000f0000 @ Mask for ID
244 .long PMD_TYPE_SECT | \
245 PMD_SECT_BUFFERABLE | \
246 PMD_SECT_CACHEABLE | \
247 PMD_SECT_AP_WRITE | \
248 PMD_SECT_AP_READ
249 .long PMD_TYPE_SECT | \
250 PMD_SECT_XN | \
251 PMD_SECT_AP_WRITE | \
252 PMD_SECT_AP_READ
253 b __v7_setup
254 .long cpu_arch_name
255 .long cpu_elf_name
256 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
257 .long cpu_v7_name
258 .long v7_processor_functions
259 .long v6wbi_tlb_fns
260 .long v6_user_fns
261 .long v7_cache_fns
262 .size __v7_proc_info, . - __v7_proc_info