aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Fleming <matt@console-pimps.org>2010-01-18 05:33:10 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-18 05:33:10 -0500
commit3d467676abf5f01f5ee99056273a58486968e252 (patch)
tree4824c0f9a6dc7ade813a58f6ac4fa1e3780f035e
parent7dcaa8e8e67b2cfbe0097c9bb52e23aed5443b8b (diff)
sh: Setup early PMB mappings.
More and more boards are going to start shipping that boot with the MMU in 32BIT mode by default. Previously we relied on the bootloader to setup PMB mappings for use by the kernel but we also need to cater for boards whose bootloaders don't set them up. If CONFIG_PMB_LEGACY is not enabled we have full control over our PMB mappings and can compress our address space. Usually, the distance between the the cached and uncached mappings of RAM is always 512MB, however we can compress the distance to be the amount of RAM on the board. pmb_init() now becomes much simpler. It no longer has to calculate any mappings, it just has to synchronise the software PMB table with the hardware. Tested on SDK7786 and SH7785LCR. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/head_32.S241
-rw-r--r--arch/sh/mm/pmb.c156
2 files changed, 346 insertions, 51 deletions
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index e5d421db4c83..8ee31a0b973e 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
3 * arch/sh/kernel/head.S 3 * arch/sh/kernel/head.S
4 * 4 *
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2010 Matt Fleming
6 * 7 *
7 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -84,6 +85,236 @@ ENTRY(_stext)
84 ldc r0, r7_bank ! ... and initial thread_info 85 ldc r0, r7_bank ! ... and initial thread_info
85#endif 86#endif
86 87
88#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
89 /*
90 * Reconfigure the initial PMB mappings setup by the hardware.
91 *
92 * When we boot in 32-bit MMU mode there are 2 PMB entries already
93 * setup for us.
94 *
95 * Entry VPN PPN V SZ C UB WT
96 * ---------------------------------------------------------------
97 * 0 0x80000000 0x00000000 1 512MB 1 0 1
98 * 1 0xA0000000 0x00000000 1 512MB 0 0 0
99 *
100 * But we reprogram them here because we want complete control over
101 * our address space and the initial mappings may not map PAGE_OFFSET
102 * to __MEMORY_START (or even map all of our RAM).
103 *
104 * Once we've setup cached and uncached mappings for all of RAM we
105 * clear the rest of the PMB entries.
106 *
107 * This clearing also deals with the fact that PMB entries can persist
108 * across reboots. The PMB could have been left in any state when the
109 * reboot occurred, so to be safe we clear all entries and start with
110 * with a clean slate.
111 */
112
113 mov.l .LMMUCR, r1 /* Flush the TLB */
114 mov.l @r1, r0
115 or #MMUCR_TI, r0
116 mov.l r0, @r1
117
118 mov.l .LMEMORY_SIZE, r5
119 mov r5, r7
120
121 mov #PMB_E_SHIFT, r0
122 mov #0x1, r4
123 shld r0, r4
124
125 mov.l .LFIRST_DATA_ENTRY, r0
126 mov.l .LPMB_DATA, r1
127 mov.l .LFIRST_ADDR_ENTRY, r2
128 mov.l .LPMB_ADDR, r3
129
130 mov #0, r10
131
132 /*
133 * r0 = PMB_DATA data field
134 * r1 = PMB_DATA address field
135 * r2 = PMB_ADDR data field
136 * r3 = PMB_ADDR address field
137 * r4 = PMB_E_SHIFT
138 * r5 = remaining amount of RAM to map
139 * r6 = PMB mapping size we're trying to use
140 * r7 = cached_to_uncached
141 * r8 = scratch register
142 * r9 = scratch register
143 * r10 = number of PMB entries we've setup
144 */
145.L512:
146 mov #(512 >> 4), r6
147 shll16 r6
148 shll8 r6
149
150 cmp/hi r5, r6
151 bt .L128
152
153 mov #(PMB_SZ_512M >> 2), r9
154 shll2 r9
155
156 /*
157 * Cached mapping
158 */
159 mov #PMB_C, r8
160 or r0, r8
161 or r9, r8
162 mov.l r8, @r1
163 mov.l r2, @r3
164
165 add r4, r1 /* Increment to the next PMB_DATA entry */
166 add r4, r3 /* Increment to the next PMB_ADDR entry */
167
168 add #1, r10 /* Increment number of PMB entries */
169
170 /*
171 * Uncached mapping
172 */
173 mov #(PMB_UB >> 8), r8
174 shll8 r8
175
176 or r0, r8
177 or r9, r8
178 mov.l r8, @r1
179 mov r2, r8
180 add r7, r8
181 mov.l r8, @r3
182
183 add r4, r1 /* Increment to the next PMB_DATA entry */
184 add r4, r3 /* Increment to the next PMB_ADDR entry */
185
186 add #1, r10 /* Increment number of PMB entries */
187
188 sub r6, r5
189 add r6, r0
190 add r6, r2
191
192 bra .L512
193
194.L128:
195 mov #(128 >> 4), r6
196 shll16 r6
197 shll8 r6
198
199 cmp/hi r5, r6
200 bt .L64
201
202 mov #(PMB_SZ_128M >> 2), r9
203 shll2 r9
204
205 /*
206 * Cached mapping
207 */
208 mov #PMB_C, r8
209 or r0, r8
210 or r9, r8
211 mov.l r8, @r1
212 mov.l r2, @r3
213
214 add r4, r1 /* Increment to the next PMB_DATA entry */
215 add r4, r3 /* Increment to the next PMB_ADDR entry */
216
217 add #1, r10 /* Increment number of PMB entries */
218
219 /*
220 * Uncached mapping
221 */
222 mov #(PMB_UB >> 8), r8
223 shll8 r8
224
225 or r0, r8
226 or r9, r8
227 mov.l r8, @r1
228 mov r2, r8
229 add r7, r8
230 mov.l r8, @r3
231
232 add r4, r1 /* Increment to the next PMB_DATA entry */
233 add r4, r3 /* Increment to the next PMB_ADDR entry */
234
235 add #1, r10 /* Increment number of PMB entries */
236
237 sub r6, r5
238 add r6, r0
239 add r6, r2
240
241 bra .L128
242
243.L64:
244 mov #(64 >> 4), r6
245 shll16 r6
246 shll8 r6
247
248 cmp/hi r5, r6
249 bt .Ldone
250
251 mov #(PMB_SZ_64M >> 2), r9
252 shll2 r9
253
254 /*
255 * Cached mapping
256 */
257 mov #PMB_C, r8
258 or r0, r8
259 or r9, r8
260 mov.l r8, @r1
261 mov.l r2, @r3
262
263 add r4, r1 /* Increment to the next PMB_DATA entry */
264 add r4, r3 /* Increment to the next PMB_ADDR entry */
265
266 add #1, r10 /* Increment number of PMB entries */
267
268 /*
269 * Uncached mapping
270 */
271 mov #(PMB_UB >> 8), r8
272 shll8 r8
273
274 or r0, r8
275 or r9, r8
276 mov.l r8, @r1
277 mov r2, r8
278 add r7, r8
279 mov.l r8, @r3
280
281 add r4, r1 /* Increment to the next PMB_DATA entry */
282 add r4, r3 /* Increment to the next PMB_ADDR entry */
283
284 add #1, r10 /* Increment number of PMB entries */
285
286 sub r6, r5
287 add r6, r0
288 add r6, r2
289
290 bra .L64
291
292.Ldone:
293 /* Update cached_to_uncached */
294 mov.l .Lcached_to_uncached, r0
295 mov.l r7, @r0
296
297 /*
298 * Clear the remaining PMB entries.
299 *
300 * r3 = entry to begin clearing from
301 * r10 = number of entries we've setup so far
302 */
303 mov #0, r1
304 mov #PMB_ENTRY_MAX, r0
305
306.Lagain:
307 mov.l r1, @r3 /* Clear PMB_ADDR entry */
308 add #1, r10 /* Increment the loop counter */
309 cmp/eq r0, r10
310 bf/s .Lagain
311 add r4, r3 /* Increment to the next PMB_ADDR entry */
312
313 mov.l 6f, r0
314 icbi @r0
315
316#endif /* !CONFIG_PMB_LEGACY */
317
87#ifndef CONFIG_SH_NO_BSS_INIT 318#ifndef CONFIG_SH_NO_BSS_INIT
88 /* 319 /*
89 * Don't clear BSS if running on slow platforms such as an RTL simulation, 320 * Don't clear BSS if running on slow platforms such as an RTL simulation,
@@ -133,3 +364,13 @@ ENTRY(stack_start)
1335: .long start_kernel 3645: .long start_kernel
1346: .long sh_cpu_init 3656: .long sh_cpu_init
1357: .long init_thread_union 3667: .long init_thread_union
367
368#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
369.LPMB_ADDR: .long PMB_ADDR
370.LPMB_DATA: .long PMB_DATA
371.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
372.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
373.LMMUCR: .long MMUCR
374.Lcached_to_uncached: .long cached_to_uncached
375.LMEMORY_SIZE: .long __MEMORY_SIZE
376#endif
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 8f7dbf183fb0..b796b6c021b4 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,11 +3,8 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005 - 2010 Paul Mundt 6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * 7 * Copyright (C) 2010 Matt Fleming
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 * 8 *
12 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive 10 * License. See the file "COPYING" in the main directory of this archive
@@ -280,46 +277,82 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
280} 277}
281 278
282#ifdef CONFIG_PMB_LEGACY 279#ifdef CONFIG_PMB_LEGACY
280static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
281{
282 return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
283}
284
283static int pmb_apply_legacy_mappings(void) 285static int pmb_apply_legacy_mappings(void)
284{ 286{
285 int i;
286 unsigned long addr, data;
287 unsigned int applied = 0; 287 unsigned int applied = 0;
288 int i;
288 289
289 for (i = 0; i < PMB_ENTRY_MAX; i++) { 290 pr_info("PMB: Preserving legacy mappings:\n");
290 struct pmb_entry *pmbe;
291 unsigned long vpn, ppn, flags;
292
293 addr = PMB_DATA + (i << PMB_E_SHIFT);
294 data = ctrl_inl(addr);
295 if (!(data & PMB_V))
296 continue;
297 291
298 if (data & PMB_C) { 292 /*
299#if defined(CONFIG_CACHE_WRITETHROUGH) 293 * The following entries are setup by the bootloader.
300 data |= PMB_WT; 294 *
301#elif defined(CONFIG_CACHE_WRITEBACK) 295 * Entry VPN PPN V SZ C UB
302 data &= ~PMB_WT; 296 * --------------------------------------------------------
303#else 297 * 0 0xA0000000 0x00000000 1 64MB 0 0
304 data &= ~(PMB_C | PMB_WT); 298 * 1 0xA4000000 0x04000000 1 16MB 0 0
305#endif 299 * 2 0xA6000000 0x08000000 1 16MB 0 0
306 } 300 * 9 0x88000000 0x48000000 1 128MB 1 1
307 ctrl_outl(data, addr); 301 * 10 0x90000000 0x50000000 1 128MB 1 1
308 302 * 11 0x98000000 0x58000000 1 128MB 1 1
309 ppn = data & PMB_PFN_MASK; 303 * 13 0xA8000000 0x48000000 1 128MB 0 0
304 * 14 0xB0000000 0x50000000 1 128MB 0 0
305 * 15 0xB8000000 0x58000000 1 128MB 0 0
306 *
307 * The only entries the we need are the ones that map the kernel
308 * at the cached and uncached addresses.
309 */
310 for (i = 0; i < PMB_ENTRY_MAX; i++) {
311 unsigned long addr, data;
312 unsigned long addr_val, data_val;
313 unsigned long ppn, vpn;
310 314
311 flags = data & (PMB_C | PMB_WT | PMB_UB); 315 addr = mk_pmb_addr(i);
312 flags |= data & PMB_SZ_MASK; 316 data = mk_pmb_data(i);
313 317
314 addr = PMB_ADDR + (i << PMB_E_SHIFT); 318 addr_val = __raw_readl(addr);
315 data = ctrl_inl(addr); 319 data_val = __raw_readl(data);
316 320
317 vpn = data & PMB_PFN_MASK; 321 /*
322 * Skip over any bogus entries
323 */
324 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
325 continue;
318 326
319 pmbe = pmb_alloc(vpn, ppn, flags, i); 327 ppn = data_val & PMB_PFN_MASK;
320 WARN_ON(IS_ERR(pmbe)); 328 vpn = addr_val & PMB_PFN_MASK;
321 329
322 applied++; 330 /*
331 * Only preserve in-range mappings.
332 */
333 if (pmb_ppn_in_range(ppn)) {
334 unsigned int size;
335 char *sz_str = NULL;
336
337 size = data_val & PMB_SZ_MASK;
338
339 sz_str = (size == PMB_SZ_16M) ? " 16MB":
340 (size == PMB_SZ_64M) ? " 64MB":
341 (size == PMB_SZ_128M) ? "128MB":
342 "512MB";
343
344 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
345 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
346 (data_val & PMB_C) ? "" : "un");
347
348 applied++;
349 } else {
350 /*
351 * Invalidate anything out of bounds.
352 */
353 __raw_writel(addr_val & ~PMB_V, addr);
354 __raw_writel(data_val & ~PMB_V, data);
355 }
323 } 356 }
324 357
325 return (applied == 0); 358 return (applied == 0);
@@ -333,8 +366,9 @@ static inline int pmb_apply_legacy_mappings(void)
333 366
334int __uses_jump_to_uncached pmb_init(void) 367int __uses_jump_to_uncached pmb_init(void)
335{ 368{
336 unsigned int i; 369 int i;
337 unsigned long size, ret; 370 unsigned long addr, data;
371 unsigned long ret;
338 372
339 jump_to_uncached(); 373 jump_to_uncached();
340 374
@@ -352,25 +386,45 @@ int __uses_jump_to_uncached pmb_init(void)
352 } 386 }
353 387
354 /* 388 /*
355 * Insert PMB entries for the P1 and P2 areas so that, after 389 * Sync our software copy of the PMB mappings with those in
356 * we've switched the MMU to 32-bit mode, the semantics of P1 390 * hardware. The mappings in the hardware PMB were either set up
357 * and P2 are the same as in 29-bit mode, e.g. 391 * by the bootloader or very early on by the kernel.
358 *
359 * P1 - provides a cached window onto physical memory
360 * P2 - provides an uncached window onto physical memory
361 */ 392 */
362 size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; 393 for (i = 0; i < PMB_ENTRY_MAX; i++) {
394 struct pmb_entry *pmbe;
395 unsigned long vpn, ppn, flags;
363 396
364 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); 397 addr = PMB_DATA + (i << PMB_E_SHIFT);
365 BUG_ON(ret != size); 398 data = ctrl_inl(addr);
399 if (!(data & PMB_V))
400 continue;
366 401
367 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); 402 if (data & PMB_C) {
368 BUG_ON(ret != size); 403#if defined(CONFIG_CACHE_WRITETHROUGH)
404 data |= PMB_WT;
405#elif defined(CONFIG_CACHE_WRITEBACK)
406 data &= ~PMB_WT;
407#else
408 data &= ~(PMB_C | PMB_WT);
409#endif
410 }
411 ctrl_outl(data, addr);
369 412
370 ctrl_outl(0, PMB_IRMCR); 413 ppn = data & PMB_PFN_MASK;
414
415 flags = data & (PMB_C | PMB_WT | PMB_UB);
416 flags |= data & PMB_SZ_MASK;
371 417
372 /* PMB.SE and UB[7] */ 418 addr = PMB_ADDR + (i << PMB_E_SHIFT);
373 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); 419 data = ctrl_inl(addr);
420
421 vpn = data & PMB_PFN_MASK;
422
423 pmbe = pmb_alloc(vpn, ppn, flags, i);
424 WARN_ON(IS_ERR(pmbe));
425 }
426
427 ctrl_outl(0, PMB_IRMCR);
374 428
375 /* Flush out the TLB */ 429 /* Flush out the TLB */
376 i = ctrl_inl(MMUCR); 430 i = ctrl_inl(MMUCR);