aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2012-12-03 06:01:43 -0500
committerChris Zankel <chris@zankel.net>2013-05-09 04:07:09 -0400
commite85e335f8ff615f74e29e09cc2599f095600114b (patch)
tree8f09bbab5ca6a37f027fef17bf1de523ab574f10 /arch/xtensa/include
parentd83ff0bb828854d9e7172ac5d8d007a7466934c9 (diff)
xtensa: add MMU v3 support
MMUv3 comes out of reset with identity vaddr -> paddr mapping in the TLB way 6: Way 6 (512 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0x00000000 0x00000000 0x01 0x03 RWX Bypass 0x20000000 0x20000000 0x01 0x03 RWX Bypass 0x40000000 0x40000000 0x01 0x03 RWX Bypass 0x60000000 0x60000000 0x01 0x03 RWX Bypass 0x80000000 0x80000000 0x01 0x03 RWX Bypass 0xa0000000 0xa0000000 0x01 0x03 RWX Bypass 0xc0000000 0xc0000000 0x01 0x03 RWX Bypass 0xe0000000 0xe0000000 0x01 0x03 RWX Bypass This patch adds remapping code at the reset vector or at the kernel _start (depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) that reconfigures MMUv3 as MMUv2: Way 5 (128 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0xd0000000 0x00000000 0x01 0x07 RWX WB 0xd8000000 0x00000000 0x01 0x03 RWX Bypass Way 6 (256 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0xe0000000 0xf0000000 0x01 0x07 RWX WB 0xf0000000 0xf0000000 0x01 0x03 RWX Bypass Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h107
-rw-r--r--arch/xtensa/include/asm/vectors.h125
2 files changed, 232 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e1f8ba4061ed..722553f17db3 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,9 @@
23#ifndef _XTENSA_INITIALIZE_MMU_H 23#ifndef _XTENSA_INITIALIZE_MMU_H
24#define _XTENSA_INITIALIZE_MMU_H 24#define _XTENSA_INITIALIZE_MMU_H
25 25
26#include <asm/pgtable.h>
27#include <asm/vectors.h>
28
26#ifdef __ASSEMBLY__ 29#ifdef __ASSEMBLY__
27 30
28#define XTENSA_HWVERSION_RC_2009_0 230000 31#define XTENSA_HWVERSION_RC_2009_0 230000
@@ -48,6 +51,110 @@
48 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) 51 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
49 */ 52 */
50 53
54#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
55/*
56 * Have MMU v3
57 */
58
59#if !XCHAL_HAVE_VECBASE
60# error "MMU v3 requires reloc vectors"
61#endif
62
63 movi a1, 0
64 _call0 1f
65 _j 2f
66
67 .align 4
681: movi a2, 0x10000000
69 movi a3, 0x18000000
70 add a2, a2, a0
719: bgeu a2, a3, 9b /* PC is out of the expected range */
72
73 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
74
75 movi a2, 0x40000006
76 idtlb a2
77 iitlb a2
78 isync
79
80 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
81 * and jump to the new mapping.
82 */
83#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
84#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
85
86 srli a3, a0, 27
87 slli a3, a3, 27
88 addi a3, a3, CA_BYPASS
89 addi a7, a2, -1
90 wdtlb a3, a7
91 witlb a3, a7
92 isync
93
94 slli a4, a0, 5
95 srli a4, a4, 5
96 addi a5, a2, -6
97 add a4, a4, a5
98 jx a4
99
100 /* Step 3: unmap everything other than current area.
101 * Start at 0x60000000, wrap around, and end with 0x20000000
102 */
1032: movi a4, 0x20000000
104 add a5, a2, a4
1053: idtlb a5
106 iitlb a5
107 add a5, a5, a4
108 bne a5, a2, 3b
109
110 /* Step 4: Setup MMU with the old V2 mappings. */
111 movi a6, 0x01000000
112 wsr a6, ITLBCFG
113 wsr a6, DTLBCFG
114 isync
115
116 movi a5, 0xd0000005
117 movi a4, CA_WRITEBACK
118 wdtlb a4, a5
119 witlb a4, a5
120
121 movi a5, 0xd8000005
122 movi a4, CA_BYPASS
123 wdtlb a4, a5
124 witlb a4, a5
125
126 movi a5, 0xe0000006
127 movi a4, 0xf0000000 + CA_WRITEBACK
128 wdtlb a4, a5
129 witlb a4, a5
130
131 movi a5, 0xf0000006
132 movi a4, 0xf0000000 + CA_BYPASS
133 wdtlb a4, a5
134 witlb a4, a5
135
136 isync
137
138 /* Jump to self, using MMU v2 mappings. */
139 movi a4, 1f
140 jx a4
141
1421:
143 movi a2, VECBASE_RESET_VADDR
144 wsr a2, vecbase
145
146 /* Step 5: remove temporary mapping. */
147 idtlb a7
148 iitlb a7
149 isync
150
151 movi a0, 0
152 wsr a0, ptevaddr
153 rsync
154
155#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
156 XCHAL_HAVE_SPANNING_WAY */
157
51 .endm 158 .endm
52 159
53#endif /*__ASSEMBLY__*/ 160#endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000000..c52b656d0310
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
1/*
2 * arch/xtensa/include/asm/xchal_vaddr_remap.h
3 *
4 * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
5 * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
6 * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 2008 - 2012 Tensilica Inc.
13 *
14 * Pete Delaney <piet@tensilica.com>
15 * Marc Gauthier <marc@tensilica.com
16 */
17
18#ifndef _XTENSA_VECTORS_H
19#define _XTENSA_VECTORS_H
20
21#include <variant/core.h>
22
23#if defined(CONFIG_MMU)
24
25/* Will Become VECBASE */
26#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
27
28/* Image Virtual Start Address */
29#define KERNELOFFSET 0xD0003000
30
31#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
32 /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
33 #define PHYSICAL_MEMORY_ADDRESS 0x00000000
34 #define LOAD_MEMORY_ADDRESS 0x00003000
35#else
36 /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
37 #define PHYSICAL_MEMORY_ADDRESS 0xD0000000
38 #define LOAD_MEMORY_ADDRESS 0xD0003000
39#endif
40
41#else /* !defined(CONFIG_MMU) */
42 /* MMU Not being used - Virtual == Physical */
43
44 /* VECBASE */
45 #define VIRTUAL_MEMORY_ADDRESS 0x00002000
46
47 /* Location of the start of the kernel text, _start */
48 #define KERNELOFFSET 0x00003000
49 #define PHYSICAL_MEMORY_ADDRESS 0x00000000
50
51 /* Loaded just above possibly live vectors */
52 #define LOAD_MEMORY_ADDRESS 0x00003000
53
54#endif /* CONFIG_MMU */
55
56#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
57#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset)
58
59/* Used to set VECBASE register */
60#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
61
62#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
63 VECBASE_RESET_VADDR)
64#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
65
66#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
67 VECBASE_RESET_VADDR)
68#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
69
70#if XCHAL_HAVE_VECBASE
71
72#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
73#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
74#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
75#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
76#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
77#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
78#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
79#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
80#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
81
82#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
83
84#undef XCHAL_NMI_VECTOR_VADDR
85#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
86
87#undef XCHAL_INTLEVEL7_VECTOR_VADDR
88#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
89
90/*
91 * These XCHAL_* #defines from varian/core.h
92 * are not valid to use with V3 MMU. Non-XCHAL
93 * constants are defined above and should be used.
94 */
95#undef XCHAL_VECBASE_RESET_VADDR
96#undef XCHAL_RESET_VECTOR0_VADDR
97#undef XCHAL_USER_VECTOR_VADDR
98#undef XCHAL_KERNEL_VECTOR_VADDR
99#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
100#undef XCHAL_WINDOW_VECTORS_VADDR
101#undef XCHAL_INTLEVEL2_VECTOR_VADDR
102#undef XCHAL_INTLEVEL3_VECTOR_VADDR
103#undef XCHAL_INTLEVEL4_VECTOR_VADDR
104#undef XCHAL_INTLEVEL5_VECTOR_VADDR
105#undef XCHAL_INTLEVEL6_VECTOR_VADDR
106#undef XCHAL_DEBUG_VECTOR_VADDR
107#undef XCHAL_NMI_VECTOR_VADDR
108#undef XCHAL_INTLEVEL7_VECTOR_VADDR
109
110#else
111
112#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
113#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
114#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
115#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
116#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
117#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
118#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
119#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
120#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
121#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
122
123#endif
124
125#endif /* _XTENSA_VECTORS_H */