aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
commita0ab36689a36e583b6e736f1c99ac8c9aebdad59 (patch)
tree3efc6236d7ec0b5dbeeb311b8cd923c8e1e7f88c /arch/sh/mm
parent7f33306ee57bce9c79825e89c457a91025aa5aad (diff)
sh: fixed PMB mode refactoring.
This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig24
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/pmb.c106
3 files changed, 67 insertions, 65 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 358c860aeb9b..860cd24b4205 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -80,30 +80,18 @@ config 32BIT
80 bool 80 bool
81 default y if CPU_SH5 81 default y if CPU_SH5
82 82
83config PMB_ENABLE
84 bool "Support 32-bit physical addressing through PMB"
85 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
86 help
87 If you say Y here, physical addressing will be extended to
88 32-bits through the SH-4A PMB. If this is not set, legacy
89 29-bit physical addressing will be used.
90
91choice
92 prompt "PMB handling type"
93 depends on PMB_ENABLE
94 default PMB_FIXED
95
96config PMB 83config PMB
97 bool "PMB" 84 bool "Support 32-bit physical addressing through PMB"
98 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP 85 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
86 select 32BIT
99 help 87 help
100 If you say Y here, physical addressing will be extended to 88 If you say Y here, physical addressing will be extended to
101 32-bits through the SH-4A PMB. If this is not set, legacy 89 32-bits through the SH-4A PMB. If this is not set, legacy
102 29-bit physical addressing will be used. 90 29-bit physical addressing will be used.
103 91
104config PMB_FIXED 92config PMB_LEGACY
105 bool "fixed PMB" 93 bool "Support legacy boot mappings for PMB"
106 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP 94 depends on PMB
107 select 32BIT 95 select 32BIT
108 help 96 help
109 If this option is enabled, fixed PMB mappings are inherited 97 If this option is enabled, fixed PMB mappings are inherited
@@ -111,8 +99,6 @@ config PMB_FIXED
111 management. This is the closest to legacy 29-bit physical mode, 99 management. This is the closest to legacy 29-bit physical mode,
112 and allows systems to support up to 512MiB of system memory. 100 and allows systems to support up to 512MiB of system memory.
113 101
114endchoice
115
116config X2TLB 102config X2TLB
117 bool "Enable extended TLB mode" 103 bool "Enable extended TLB mode"
118 depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL 104 depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 9fa11d655044..edde8bdd681d 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -33,7 +33,7 @@ obj-y += $(tlb-y)
33endif 33endif
34 34
35obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 35obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
36obj-$(CONFIG_PMB_ENABLE) += pmb.o 36obj-$(CONFIG_PMB) += pmb.o
37obj-$(CONFIG_NUMA) += numa.o 37obj-$(CONFIG_NUMA) += numa.o
38 38
39# Special flags for fault_64.o. This puts restrictions on the number of 39# Special flags for fault_64.o. This puts restrictions on the number of
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 280f6a166035..8f7dbf183fb0 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005, 2006, 2007 Paul Mundt 6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
279 } while (pmbe); 279 } while (pmbe);
280} 280}
281 281
282#ifdef CONFIG_PMB 282#ifdef CONFIG_PMB_LEGACY
283int __uses_jump_to_uncached pmb_init(void) 283static int pmb_apply_legacy_mappings(void)
284{
285 unsigned int i;
286 long size, ret;
287
288 jump_to_uncached();
289
290 /*
291 * Insert PMB entries for the P1 and P2 areas so that, after
292 * we've switched the MMU to 32-bit mode, the semantics of P1
293 * and P2 are the same as in 29-bit mode, e.g.
294 *
295 * P1 - provides a cached window onto physical memory
296 * P2 - provides an uncached window onto physical memory
297 */
298 size = __MEMORY_START + __MEMORY_SIZE;
299
300 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
301 BUG_ON(ret != size);
302
303 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
304 BUG_ON(ret != size);
305
306 ctrl_outl(0, PMB_IRMCR);
307
308 /* PMB.SE and UB[7] */
309 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
310
311 /* Flush out the TLB */
312 i = ctrl_inl(MMUCR);
313 i |= MMUCR_TI;
314 ctrl_outl(i, MMUCR);
315
316 back_to_cached();
317
318 return 0;
319}
320#else
321int __uses_jump_to_uncached pmb_init(void)
322{ 284{
323 int i; 285 int i;
324 unsigned long addr, data; 286 unsigned long addr, data;
325 287 unsigned int applied = 0;
326 jump_to_uncached();
327 288
328 for (i = 0; i < PMB_ENTRY_MAX; i++) { 289 for (i = 0; i < PMB_ENTRY_MAX; i++) {
329 struct pmb_entry *pmbe; 290 struct pmb_entry *pmbe;
@@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void)
357 318
358 pmbe = pmb_alloc(vpn, ppn, flags, i); 319 pmbe = pmb_alloc(vpn, ppn, flags, i);
359 WARN_ON(IS_ERR(pmbe)); 320 WARN_ON(IS_ERR(pmbe));
321
322 applied++;
323 }
324
325 return (applied == 0);
326}
327#else
328static inline int pmb_apply_legacy_mappings(void)
329{
330 return 1;
331}
332#endif
333
334int __uses_jump_to_uncached pmb_init(void)
335{
336 unsigned int i;
337 unsigned long size, ret;
338
339 jump_to_uncached();
340
341 /*
342 * Attempt to apply the legacy boot mappings if configured. If
343 * this is successful then we simply carry on with those and
344 * don't bother establishing additional memory mappings. Dynamic
345 * device mappings through pmb_remap() can still be bolted on
346 * after this.
347 */
348 ret = pmb_apply_legacy_mappings();
349 if (ret == 0) {
350 back_to_cached();
351 return 0;
360 } 352 }
361 353
354 /*
355 * Insert PMB entries for the P1 and P2 areas so that, after
356 * we've switched the MMU to 32-bit mode, the semantics of P1
357 * and P2 are the same as in 29-bit mode, e.g.
358 *
359 * P1 - provides a cached window onto physical memory
360 * P2 - provides an uncached window onto physical memory
361 */
362 size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
363
364 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
365 BUG_ON(ret != size);
366
367 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
368 BUG_ON(ret != size);
369
370 ctrl_outl(0, PMB_IRMCR);
371
372 /* PMB.SE and UB[7] */
373 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
374
375 /* Flush out the TLB */
376 i = ctrl_inl(MMUCR);
377 i |= MMUCR_TI;
378 ctrl_outl(i, MMUCR);
379
362 back_to_cached(); 380 back_to_cached();
363 381
364 return 0; 382 return 0;
365} 383}
366#endif /* CONFIG_PMB */
367 384
368static int pmb_seq_show(struct seq_file *file, void *iter) 385static int pmb_seq_show(struct seq_file *file, void *iter)
369{ 386{
@@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void)
462{ 479{
463 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); 480 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
464} 481}
465
466subsys_initcall(pmb_sysdev_init); 482subsys_initcall(pmb_sysdev_init);
467#endif 483#endif