aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/head_32.S169
1 files changed, 92 insertions, 77 deletions
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index ee954d1b8cc6..e93320135cda 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -86,7 +86,96 @@ ENTRY(_stext)
86#endif 86#endif
87 87
88#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) 88#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
89/*
90 * Reconfigure the initial PMB mappings setup by the hardware.
91 *
92 * When we boot in 32-bit MMU mode there are 2 PMB entries already
93 * setup for us.
94 *
95 * Entry VPN PPN V SZ C UB WT
96 * ---------------------------------------------------------------
97 * 0 0x80000000 0x00000000 1 512MB 1 0 1
98 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
99 *
100 * But we reprogram them here because we want complete control over
101 * our address space and the initial mappings may not map PAGE_OFFSET
102 * to __MEMORY_START (or even map all of our RAM).
103 *
104 * Once we've setup cached and uncached mappings we clear the rest of the
105 * PMB entries. This clearing also deals with the fact that PMB entries
106 * can persist across reboots. The PMB could have been left in any state
107 * when the reboot occurred, so to be safe we clear all entries and start
108 * with with a clean slate.
109 *
110 * The uncached mapping is constructed using the smallest possible
111 * mapping with a single unbufferable page. Only the kernel text needs to
112 * be covered via the uncached mapping so that certain functions can be
113 * run uncached.
114 *
115 * Drivers and the like that have previously abused the 1:1 identity
116 * mapping are unsupported in 32-bit mode and must specify their caching
117 * preference when page tables are constructed.
118 *
119 * This frees up the P2 space for more nefarious purposes.
120 *
121 * Register utilization is as follows:
122 *
123 * r0 = PMB_DATA data field
124 * r1 = PMB_DATA address field
125 * r2 = PMB_ADDR data field
126 * r3 = PMB_ADDR address field
127 * r4 = PMB_E_SHIFT
128 * r5 = remaining amount of RAM to map
129 * r6 = PMB mapping size we're trying to use
130 * r7 = cached_to_uncached
131 * r8 = scratch register
132 * r9 = scratch register
133 * r10 = number of PMB entries we've setup
134 */
135
136 mov.l .LMMUCR, r1 /* Flush the TLB */
137 mov.l @r1, r0
138 or #MMUCR_TI, r0
139 mov.l r0, @r1
140
141 mov.l .LMEMORY_SIZE, r5
142 mov r5, r7
143
144 mov #PMB_E_SHIFT, r0
145 mov #0x1, r4
146 shld r0, r4
89 147
148 mov.l .LFIRST_DATA_ENTRY, r0
149 mov.l .LPMB_DATA, r1
150 mov.l .LFIRST_ADDR_ENTRY, r2
151 mov.l .LPMB_ADDR, r3
152
153 mov #0, r10
154
155 /*
156 * Uncached mapping
157 */
158 mov #(PMB_SZ_16M >> 2), r9
159 shll2 r9
160
161 mov #(PMB_UB >> 8), r8
162 shll8 r8
163
164 or r0, r8
165 or r9, r8
166 mov.l r8, @r1
167 mov r2, r8
168 add r7, r8
169 mov.l r8, @r3
170
171 add r4, r1
172 add r4, r3
173 add #1, r10
174
175/*
176 * Iterate over all of the available sizes from largest to
177 * smallest for constructing the cached mapping.
178 */
90#define __PMB_ITER_BY_SIZE(size) \ 179#define __PMB_ITER_BY_SIZE(size) \
91.L##size: \ 180.L##size: \
92 mov #(size >> 4), r6; \ 181 mov #(size >> 4), r6; \
@@ -115,26 +204,6 @@ ENTRY(_stext)
115 /* Increment number of PMB entries */ \ 204 /* Increment number of PMB entries */ \
116 add #1, r10; \ 205 add #1, r10; \
117 \ 206 \
118 /* \
119 * Uncached mapping \
120 */ \
121 mov #(PMB_UB >> 8), r8; \
122 shll8 r8; \
123 \
124 or r0, r8; \
125 or r9, r8; \
126 mov.l r8, @r1; \
127 mov r2, r8; \
128 add r7, r8; \
129 mov.l r8, @r3; \
130 \
131 /* Increment to the next PMB_DATA entry */ \
132 add r4, r1; \
133 /* Increment to the next PMB_ADDR entry */ \
134 add r4, r3; \
135 /* Increment number of PMB entries */ \
136 add #1, r10; \
137 \
138 sub r6, r5; \ 207 sub r6, r5; \
139 add r6, r0; \ 208 add r6, r0; \
140 add r6, r2; \ 209 add r6, r2; \
@@ -142,68 +211,14 @@ ENTRY(_stext)
142 bra .L##size; \ 211 bra .L##size; \
1439999: 2129999:
144 213
145 /*
146 * Reconfigure the initial PMB mappings setup by the hardware.
147 *
148 * When we boot in 32-bit MMU mode there are 2 PMB entries already
149 * setup for us.
150 *
151 * Entry VPN PPN V SZ C UB WT
152 * ---------------------------------------------------------------
153 * 0 0x80000000 0x00000000 1 512MB 1 0 1
154 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
155 *
156 * But we reprogram them here because we want complete control over
157 * our address space and the initial mappings may not map PAGE_OFFSET
158 * to __MEMORY_START (or even map all of our RAM).
159 *
160 * Once we've setup cached and uncached mappings for all of RAM we
161 * clear the rest of the PMB entries.
162 *
163 * This clearing also deals with the fact that PMB entries can persist
164 * across reboots. The PMB could have been left in any state when the
165 * reboot occurred, so to be safe we clear all entries and start with
166 * with a clean slate.
167 */
168
169 mov.l .LMMUCR, r1 /* Flush the TLB */
170 mov.l @r1, r0
171 or #MMUCR_TI, r0
172 mov.l r0, @r1
173
174 mov.l .LMEMORY_SIZE, r5
175 mov r5, r7
176
177 mov #PMB_E_SHIFT, r0
178 mov #0x1, r4
179 shld r0, r4
180
181 mov.l .LFIRST_DATA_ENTRY, r0
182 mov.l .LPMB_DATA, r1
183 mov.l .LFIRST_ADDR_ENTRY, r2
184 mov.l .LPMB_ADDR, r3
185
186 mov #0, r10
187
188 /*
189 * r0 = PMB_DATA data field
190 * r1 = PMB_DATA address field
191 * r2 = PMB_ADDR data field
192 * r3 = PMB_ADDR address field
193 * r4 = PMB_E_SHIFT
194 * r5 = remaining amount of RAM to map
195 * r6 = PMB mapping size we're trying to use
196 * r7 = cached_to_uncached
197 * r8 = scratch register
198 * r9 = scratch register
199 * r10 = number of PMB entries we've setup
200 */
201 __PMB_ITER_BY_SIZE(512) 214 __PMB_ITER_BY_SIZE(512)
202 __PMB_ITER_BY_SIZE(128) 215 __PMB_ITER_BY_SIZE(128)
203 __PMB_ITER_BY_SIZE(64) 216 __PMB_ITER_BY_SIZE(64)
204 __PMB_ITER_BY_SIZE(16) 217 __PMB_ITER_BY_SIZE(16)
205 218
206 /* Update cached_to_uncached */ 219 /*
220 * Now that we can access it, update cached_to_uncached.
221 */
207 mov.l .Lcached_to_uncached, r0 222 mov.l .Lcached_to_uncached, r0
208 mov.l r7, @r0 223 mov.l r7, @r0
209 224