aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-04-04 15:09:46 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-06-01 18:48:19 -0400
commitb2c3e38a54714e917c9e8675ff5812dca1c0f39d (patch)
tree0d5e9747b2c73ccd4c961c8d6a50841b52cf11fd
parent1221ed10f2a56ecdd8ff75f436f52aca5ba0f1d3 (diff)
ARM: redo TTBR setup code for LPAE
Re-engineer the LPAE TTBR setup code. Rather than passing some shifted address in order to fit in a CPU register, pass either a full physical address (in the case of r4, r5 for TTBR0) or a PFN (for TTBR1). This removes the ARCH_PGD_SHIFT hack, and the last dangerous user of cpu_set_ttbr() in the secondary CPU startup code path (which was there to re-set TTBR1 to the appropriate high physical address space on Keystone2.) Tested-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/memory.h16
-rw-r--r--arch/arm/include/asm/proc-fns.h7
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S42
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mach-keystone/platsmp.c13
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-3level.S14
-rw-r--r--arch/arm/mm/proc-v7.S26
10 files changed, 60 insertions, 78 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 184def0e1652..3a72d69b3255 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,8 +18,6 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/sizes.h> 19#include <linux/sizes.h>
20 20
21#include <asm/cache.h>
22
23#ifdef CONFIG_NEED_MACH_MEMORY_H 21#ifdef CONFIG_NEED_MACH_MEMORY_H
24#include <mach/memory.h> 22#include <mach/memory.h>
25#endif 23#endif
@@ -133,20 +131,6 @@
133#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 131#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
134 132
135/* 133/*
136 * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
137 * around in head.S and proc-*.S are shifted by this amount, in order to
138 * leave spare high bits for systems with physical address extension. This
139 * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
140 * gives us about 38-bits or so.
141 */
142#ifdef CONFIG_ARM_LPAE
143#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
144#else
145#define ARCH_PGD_SHIFT 0
146#endif
147#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
148
149/*
150 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 134 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
151 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't 135 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
152 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use 136 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 5324c1112f3a..8877ad5ffe10 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -125,13 +125,6 @@ extern void cpu_resume(void);
125 ttbr; \ 125 ttbr; \
126 }) 126 })
127 127
128#define cpu_set_ttbr(nr, val) \
129 do { \
130 u64 ttbr = val; \
131 __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
132 : : "r" (ttbr)); \
133 } while (0)
134
135#define cpu_get_pgd() \ 128#define cpu_get_pgd() \
136 ({ \ 129 ({ \
137 u64 pg = cpu_get_ttbr(0); \ 130 u64 pg = cpu_get_ttbr(0); \
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 18f5a554134f..487aa08f31ee 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -61,7 +61,7 @@ asmlinkage void secondary_start_kernel(void);
61struct secondary_data { 61struct secondary_data {
62 union { 62 union {
63 unsigned long mpu_rgn_szr; 63 unsigned long mpu_rgn_szr;
64 unsigned long pgdir; 64 u64 pgdir;
65 }; 65 };
66 unsigned long swapper_pg_dir; 66 unsigned long swapper_pg_dir;
67 void *stack; 67 void *stack;
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index aebfbf79a1a3..84da14b7cd04 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -123,7 +123,7 @@ ENTRY(secondary_startup)
123ENDPROC(secondary_startup) 123ENDPROC(secondary_startup)
124 124
125ENTRY(__secondary_switched) 125ENTRY(__secondary_switched)
126 ldr sp, [r7, #8] @ set up the stack pointer 126 ldr sp, [r7, #12] @ set up the stack pointer
127 mov fp, #0 127 mov fp, #0
128 b secondary_start_kernel 128 b secondary_start_kernel
129ENDPROC(__secondary_switched) 129ENDPROC(__secondary_switched)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 3637973a9708..7304b4c44b52 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -131,13 +131,30 @@ ENTRY(stext)
131 * The following calls CPU specific code in a position independent 131 * The following calls CPU specific code in a position independent
132 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 132 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
133 * xxx_proc_info structure selected by __lookup_processor_type 133 * xxx_proc_info structure selected by __lookup_processor_type
134 * above. On return, the CPU will be ready for the MMU to be 134 * above.
135 * turned on, and r0 will hold the CPU control register value. 135 *
136 * The processor init function will be called with:
137 * r1 - machine type
138 * r2 - boot data (atags/dt) pointer
139 * r4 - translation table base (low word)
140 * r5 - translation table base (high word, if LPAE)
141 * r8 - translation table base 1 (pfn if LPAE)
142 * r9 - cpuid
143 * r13 - virtual address for __enable_mmu -> __turn_mmu_on
144 *
145 * On return, the CPU will be ready for the MMU to be turned on,
146 * r0 will hold the CPU control register value, r1, r2, r4, and
147 * r9 will be preserved. r5 will also be preserved if LPAE.
136 */ 148 */
137 ldr r13, =__mmap_switched @ address to jump to after 149 ldr r13, =__mmap_switched @ address to jump to after
138 @ mmu has been enabled 150 @ mmu has been enabled
139 adr lr, BSYM(1f) @ return (PIC) address 151 adr lr, BSYM(1f) @ return (PIC) address
152#ifdef CONFIG_ARM_LPAE
153 mov r5, #0 @ high TTBR0
154 mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
155#else
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 156 mov r8, r4 @ set TTBR1 to swapper_pg_dir
157#endif
141 ldr r12, [r10, #PROCINFO_INITFUNC] 158 ldr r12, [r10, #PROCINFO_INITFUNC]
142 add r12, r12, r10 159 add r12, r12, r10
143 ret r12 160 ret r12
@@ -158,7 +175,7 @@ ENDPROC(stext)
158 * 175 *
159 * Returns: 176 * Returns:
160 * r0, r3, r5-r7 corrupted 177 * r0, r3, r5-r7 corrupted
161 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 178 * r4 = physical page table address
162 */ 179 */
163__create_page_tables: 180__create_page_tables:
164 pgtbl r4, r8 @ page table address 181 pgtbl r4, r8 @ page table address
@@ -333,7 +350,6 @@ __create_page_tables:
333#endif 350#endif
334#ifdef CONFIG_ARM_LPAE 351#ifdef CONFIG_ARM_LPAE
335 sub r4, r4, #0x1000 @ point to the PGD table 352 sub r4, r4, #0x1000 @ point to the PGD table
336 mov r4, r4, lsr #ARCH_PGD_SHIFT
337#endif 353#endif
338 ret lr 354 ret lr
339ENDPROC(__create_page_tables) 355ENDPROC(__create_page_tables)
@@ -381,9 +397,9 @@ ENTRY(secondary_startup)
381 adr r4, __secondary_data 397 adr r4, __secondary_data
382 ldmia r4, {r5, r7, r12} @ address to jump to after 398 ldmia r4, {r5, r7, r12} @ address to jump to after
383 sub lr, r4, r5 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
384 ldr r4, [r7, lr] @ get secondary_data.pgdir 400 add r3, r7, lr
385 add r7, r7, #4 401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir 402 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
387 adr lr, BSYM(__enable_mmu) @ return address 403 adr lr, BSYM(__enable_mmu) @ return address
388 mov r13, r12 @ __secondary_switched address 404 mov r13, r12 @ __secondary_switched address
389 ldr r12, [r10, #PROCINFO_INITFUNC] 405 ldr r12, [r10, #PROCINFO_INITFUNC]
@@ -397,7 +413,7 @@ ENDPROC(secondary_startup_arm)
397 * r6 = &secondary_data 413 * r6 = &secondary_data
398 */ 414 */
399ENTRY(__secondary_switched) 415ENTRY(__secondary_switched)
400 ldr sp, [r7, #4] @ get secondary_data.stack 416 ldr sp, [r7, #12] @ get secondary_data.stack
401 mov fp, #0 417 mov fp, #0
402 b secondary_start_kernel 418 b secondary_start_kernel
403ENDPROC(__secondary_switched) 419ENDPROC(__secondary_switched)
@@ -416,12 +432,14 @@ __secondary_data:
416/* 432/*
417 * Setup common bits before finally enabling the MMU. Essentially 433 * Setup common bits before finally enabling the MMU. Essentially
418 * this is just loading the page table pointer and domain access 434 * this is just loading the page table pointer and domain access
419 * registers. 435 * registers. All these registers need to be preserved by the
436 * processor setup function (or set in the case of r0)
420 * 437 *
421 * r0 = cp#15 control register 438 * r0 = cp#15 control register
422 * r1 = machine ID 439 * r1 = machine ID
423 * r2 = atags or dtb pointer 440 * r2 = atags or dtb pointer
424 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) 441 * r4 = TTBR pointer (low word)
442 * r5 = TTBR pointer (high word if LPAE)
425 * r9 = processor ID 443 * r9 = processor ID
426 * r13 = *virtual* address to jump to upon completion 444 * r13 = *virtual* address to jump to upon completion
427 */ 445 */
@@ -440,7 +458,9 @@ __enable_mmu:
440#ifdef CONFIG_CPU_ICACHE_DISABLE 458#ifdef CONFIG_CPU_ICACHE_DISABLE
441 bic r0, r0, #CR_I 459 bic r0, r0, #CR_I
442#endif 460#endif
443#ifndef CONFIG_ARM_LPAE 461#ifdef CONFIG_ARM_LPAE
462 mcrr p15, 0, r4, r5, c2 @ load TTBR0
463#else
444 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 464 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
445 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 465 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
446 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 466 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index cca5b8758185..90dfbedfbfb8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -86,9 +86,11 @@ void __init smp_set_ops(struct smp_operations *ops)
86 86
87static unsigned long get_arch_pgd(pgd_t *pgd) 87static unsigned long get_arch_pgd(pgd_t *pgd)
88{ 88{
89 phys_addr_t pgdir = virt_to_idmap(pgd); 89#ifdef CONFIG_ARM_LPAE
90 BUG_ON(pgdir & ARCH_PGD_MASK); 90 return __phys_to_pfn(virt_to_phys(pgd));
91 return pgdir >> ARCH_PGD_SHIFT; 91#else
92 return virt_to_phys(pgd);
93#endif
92} 94}
93 95
94int __cpu_up(unsigned int cpu, struct task_struct *idle) 96int __cpu_up(unsigned int cpu, struct task_struct *idle)
@@ -108,7 +110,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
108#endif 110#endif
109 111
110#ifdef CONFIG_MMU 112#ifdef CONFIG_MMU
111 secondary_data.pgdir = get_arch_pgd(idmap_pgd); 113 secondary_data.pgdir = virt_to_phys(idmap_pgd);
112 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); 114 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
113#endif 115#endif
114 sync_cache_w(&secondary_data); 116 sync_cache_w(&secondary_data);
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index 5f46a7cf907b..4bbb18463bfd 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -39,19 +39,6 @@ static int keystone_smp_boot_secondary(unsigned int cpu,
39 return error; 39 return error;
40} 40}
41 41
42#ifdef CONFIG_ARM_LPAE
43static void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu)
44{
45 pgd_t *pgd0 = pgd_offset_k(0);
46 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
47 local_flush_tlb_all();
48}
49#else
50static inline void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu)
51{}
52#endif
53
54struct smp_operations keystone_smp_ops __initdata = { 42struct smp_operations keystone_smp_ops __initdata = {
55 .smp_boot_secondary = keystone_smp_boot_secondary, 43 .smp_boot_secondary = keystone_smp_boot_secondary,
56 .smp_secondary_init = keystone_smp_secondary_initmem,
57}; 44};
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 10405b8d31af..fa385140715f 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -148,10 +148,10 @@ ENDPROC(cpu_v7_set_pte_ext)
148 * Macro for setting up the TTBRx and TTBCR registers. 148 * Macro for setting up the TTBRx and TTBCR registers.
149 * - \ttb0 and \ttb1 updated with the corresponding flags. 149 * - \ttb0 and \ttb1 updated with the corresponding flags.
150 */ 150 */
151 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp 151 .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
152 mcr p15, 0, \zero, c2, c0, 2 @ TTB control register 152 mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
153 ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP) 153 ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP)
154 ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP) 154 ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP)
155 ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) 155 ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
156 ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) 156 ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
157 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 157 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index d3daed0ae0ad..5e5720e8bc5f 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -126,11 +126,10 @@ ENDPROC(cpu_v7_set_pte_ext)
126 * Macro for setting up the TTBRx and TTBCR registers. 126 * Macro for setting up the TTBRx and TTBCR registers.
127 * - \ttbr1 updated. 127 * - \ttbr1 updated.
128 */ 128 */
129 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp 129 .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
130 ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address 130 ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
131 mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT 131 cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
132 cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? 132 mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister
133 mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
134 orr \tmp, \tmp, #TTB_EAE 133 orr \tmp, \tmp, #TTB_EAE
135 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) 134 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
136 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) 135 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
@@ -143,13 +142,10 @@ ENDPROC(cpu_v7_set_pte_ext)
143 */ 142 */
144 orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ 143 orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
145 mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR 144 mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
146 mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits 145 mov \tmp, \ttbr1, lsr #20
147 mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits 146 mov \ttbr1, \ttbr1, lsl #12
148 addls \ttbr1, \ttbr1, #TTBR1_OFFSET 147 addls \ttbr1, \ttbr1, #TTBR1_OFFSET
149 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 148 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
150 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
151 mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
152 mcrr p15, 0, \ttbr0, \tmp, c2 @ load TTBR0
153 .endm 149 .endm
154 150
155 /* 151 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3d1054f11a8a..873230912894 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -343,9 +343,9 @@ __v7_setup:
343 and r10, r0, #0xff000000 @ ARM? 343 and r10, r0, #0xff000000 @ ARM?
344 teq r10, #0x41000000 344 teq r10, #0x41000000
345 bne 3f 345 bne 3f
346 and r5, r0, #0x00f00000 @ variant 346 and r3, r0, #0x00f00000 @ variant
347 and r6, r0, #0x0000000f @ revision 347 and r6, r0, #0x0000000f @ revision
348 orr r6, r6, r5, lsr #20-4 @ combine variant and revision 348 orr r6, r6, r3, lsr #20-4 @ combine variant and revision
349 ubfx r0, r0, #4, #12 @ primary part number 349 ubfx r0, r0, #4, #12 @ primary part number
350 350
351 /* Cortex-A8 Errata */ 351 /* Cortex-A8 Errata */
@@ -354,7 +354,7 @@ __v7_setup:
354 bne 2f 354 bne 2f
355#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM) 355#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
356 356
357 teq r5, #0x00100000 @ only present in r1p* 357 teq r3, #0x00100000 @ only present in r1p*
358 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 358 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
359 orreq r10, r10, #(1 << 6) @ set IBE to 1 359 orreq r10, r10, #(1 << 6) @ set IBE to 1
360 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 360 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
@@ -395,7 +395,7 @@ __v7_setup:
395 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 395 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
396#endif 396#endif
397#ifdef CONFIG_ARM_ERRATA_743622 397#ifdef CONFIG_ARM_ERRATA_743622
398 teq r5, #0x00200000 @ only present in r2p* 398 teq r3, #0x00200000 @ only present in r2p*
399 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register 399 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
400 orreq r10, r10, #1 << 6 @ set bit #6 400 orreq r10, r10, #1 << 6 @ set bit #6
401 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 401 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
@@ -425,10 +425,10 @@ __v7_setup:
425 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 425 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
426#ifdef CONFIG_MMU 426#ifdef CONFIG_MMU
427 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 427 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
428 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup 428 v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup
429 ldr r5, =PRRR @ PRRR 429 ldr r3, =PRRR @ PRRR
430 ldr r6, =NMRR @ NMRR 430 ldr r6, =NMRR @ NMRR
431 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 431 mcr p15, 0, r3, c10, c2, 0 @ write PRRR
432 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 432 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
433#endif 433#endif
434 dsb @ Complete invalidations 434 dsb @ Complete invalidations
@@ -437,22 +437,22 @@ __v7_setup:
437 and r0, r0, #(0xf << 12) @ ThumbEE enabled field 437 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
438 teq r0, #(1 << 12) @ check if ThumbEE is present 438 teq r0, #(1 << 12) @ check if ThumbEE is present
439 bne 1f 439 bne 1f
440 mov r5, #0 440 mov r3, #0
441 mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0 441 mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0
442 mrc p14, 6, r0, c0, c0, 0 @ load TEECR 442 mrc p14, 6, r0, c0, c0, 0 @ load TEECR
443 orr r0, r0, #1 @ set the 1st bit in order to 443 orr r0, r0, #1 @ set the 1st bit in order to
444 mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access 444 mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
4451: 4451:
446#endif 446#endif
447 adr r5, v7_crval 447 adr r3, v7_crval
448 ldmia r5, {r5, r6} 448 ldmia r3, {r3, r6}
449 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables 449 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
450#ifdef CONFIG_SWP_EMULATE 450#ifdef CONFIG_SWP_EMULATE
451 orr r5, r5, #(1 << 10) @ set SW bit in "clear" 451 orr r3, r3, #(1 << 10) @ set SW bit in "clear"
452 bic r6, r6, #(1 << 10) @ clear it in "mmuset" 452 bic r6, r6, #(1 << 10) @ clear it in "mmuset"
453#endif 453#endif
454 mrc p15, 0, r0, c1, c0, 0 @ read control register 454 mrc p15, 0, r0, c1, c0, 0 @ read control register
455 bic r0, r0, r5 @ clear bits them 455 bic r0, r0, r3 @ clear bits them
456 orr r0, r0, r6 @ set them 456 orr r0, r0, r6 @ set them
457 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions 457 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
458 ret lr @ return to head.S:__ret 458 ret lr @ return to head.S:__ret