aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
commita0ab36689a36e583b6e736f1c99ac8c9aebdad59 (patch)
tree3efc6236d7ec0b5dbeeb311b8cd923c8e1e7f88c /arch/sh/mm/pmb.c
parent7f33306ee57bce9c79825e89c457a91025aa5aad (diff)
sh: fixed PMB mode refactoring.
This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c106
1 files changed, 61 insertions, 45 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 280f6a166035..8f7dbf183fb0 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005, 2006, 2007 Paul Mundt 6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
279 } while (pmbe); 279 } while (pmbe);
280} 280}
281 281
282#ifdef CONFIG_PMB 282#ifdef CONFIG_PMB_LEGACY
283int __uses_jump_to_uncached pmb_init(void) 283static int pmb_apply_legacy_mappings(void)
284{
285 unsigned int i;
286 long size, ret;
287
288 jump_to_uncached();
289
290 /*
291 * Insert PMB entries for the P1 and P2 areas so that, after
292 * we've switched the MMU to 32-bit mode, the semantics of P1
293 * and P2 are the same as in 29-bit mode, e.g.
294 *
295 * P1 - provides a cached window onto physical memory
296 * P2 - provides an uncached window onto physical memory
297 */
298 size = __MEMORY_START + __MEMORY_SIZE;
299
300 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
301 BUG_ON(ret != size);
302
303 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
304 BUG_ON(ret != size);
305
306 ctrl_outl(0, PMB_IRMCR);
307
308 /* PMB.SE and UB[7] */
309 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
310
311 /* Flush out the TLB */
312 i = ctrl_inl(MMUCR);
313 i |= MMUCR_TI;
314 ctrl_outl(i, MMUCR);
315
316 back_to_cached();
317
318 return 0;
319}
320#else
321int __uses_jump_to_uncached pmb_init(void)
322{ 284{
323 int i; 285 int i;
324 unsigned long addr, data; 286 unsigned long addr, data;
325 287 unsigned int applied = 0;
326 jump_to_uncached();
327 288
328 for (i = 0; i < PMB_ENTRY_MAX; i++) { 289 for (i = 0; i < PMB_ENTRY_MAX; i++) {
329 struct pmb_entry *pmbe; 290 struct pmb_entry *pmbe;
@@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void)
357 318
358 pmbe = pmb_alloc(vpn, ppn, flags, i); 319 pmbe = pmb_alloc(vpn, ppn, flags, i);
359 WARN_ON(IS_ERR(pmbe)); 320 WARN_ON(IS_ERR(pmbe));
321
322 applied++;
323 }
324
325 return (applied == 0);
326}
327#else
328static inline int pmb_apply_legacy_mappings(void)
329{
330 return 1;
331}
332#endif
333
334int __uses_jump_to_uncached pmb_init(void)
335{
336 unsigned int i;
337 unsigned long size, ret;
338
339 jump_to_uncached();
340
341 /*
342 * Attempt to apply the legacy boot mappings if configured. If
343 * this is successful then we simply carry on with those and
344 * don't bother establishing additional memory mappings. Dynamic
345 * device mappings through pmb_remap() can still be bolted on
346 * after this.
347 */
348 ret = pmb_apply_legacy_mappings();
349 if (ret == 0) {
350 back_to_cached();
351 return 0;
360 } 352 }
361 353
354 /*
355 * Insert PMB entries for the P1 and P2 areas so that, after
356 * we've switched the MMU to 32-bit mode, the semantics of P1
357 * and P2 are the same as in 29-bit mode, e.g.
358 *
359 * P1 - provides a cached window onto physical memory
360 * P2 - provides an uncached window onto physical memory
361 */
362 size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
363
364 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
365 BUG_ON(ret != size);
366
367 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
368 BUG_ON(ret != size);
369
370 ctrl_outl(0, PMB_IRMCR);
371
372 /* PMB.SE and UB[7] */
373 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
374
375 /* Flush out the TLB */
376 i = ctrl_inl(MMUCR);
377 i |= MMUCR_TI;
378 ctrl_outl(i, MMUCR);
379
362 back_to_cached(); 380 back_to_cached();
363 381
364 return 0; 382 return 0;
365} 383}
366#endif /* CONFIG_PMB */
367 384
368static int pmb_seq_show(struct seq_file *file, void *iter) 385static int pmb_seq_show(struct seq_file *file, void *iter)
369{ 386{
@@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void)
462{ 479{
463 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); 480 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
464} 481}
465
466subsys_initcall(pmb_sysdev_init); 482subsys_initcall(pmb_sysdev_init);
467#endif 483#endif