aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-13 04:31:48 -0500
commita0ab36689a36e583b6e736f1c99ac8c9aebdad59 (patch)
tree3efc6236d7ec0b5dbeeb311b8cd923c8e1e7f88c /arch
parent7f33306ee57bce9c79825e89c457a91025aa5aad (diff)
sh: fixed PMB mode refactoring.
This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/boot/Makefile9
-rw-r--r--arch/sh/include/asm/addrspace.h4
-rw-r--r--arch/sh/include/asm/io.h27
-rw-r--r--arch/sh/include/asm/mmu.h18
-rw-r--r--arch/sh/include/asm/page.h2
-rw-r--r--arch/sh/kernel/head_32.S4
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/vmlinux.lds.S15
-rw-r--r--arch/sh/mm/Kconfig24
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/pmb.c106
11 files changed, 117 insertions, 97 deletions
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 743ce0c8d98d..1ce63624c9b9 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -46,15 +46,8 @@ $(obj)/romImage: $(obj)/romimage/vmlinux FORCE
46$(obj)/romimage/vmlinux: $(obj)/zImage FORCE 46$(obj)/romimage/vmlinux: $(obj)/zImage FORCE
47 $(Q)$(MAKE) $(build)=$(obj)/romimage $@ 47 $(Q)$(MAKE) $(build)=$(obj)/romimage $@
48 48
49KERNEL_MEMORY := 0x00000000 49KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
50ifeq ($(CONFIG_PMB_FIXED),y)
51KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
52 $$[$(CONFIG_MEMORY_START) & 0x1fffffff]') 50 $$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
53endif
54ifeq ($(CONFIG_29BIT),y)
55KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
56 $$[$(CONFIG_MEMORY_START)]')
57endif
58 51
59KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ 52KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
60 $$[$(CONFIG_PAGE_OFFSET) + \ 53 $$[$(CONFIG_PAGE_OFFSET) + \
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 99d6b3ecbe22..bcd7d4d78f6b 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -28,7 +28,7 @@
28/* Returns the privileged segment base of a given address */ 28/* Returns the privileged segment base of a given address */
29#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) 29#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
30 30
31#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) 31#ifdef CONFIG_29BIT
32/* 32/*
33 * Map an address to a certain privileged segment 33 * Map an address to a certain privileged segment
34 */ 34 */
@@ -40,7 +40,7 @@
40 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) 40 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
41#define P4SEGADDR(a) \ 41#define P4SEGADDR(a) \
42 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) 42 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
43#endif /* 29BIT || PMB_FIXED */ 43#endif /* 29BIT */
44#endif /* P1SEG */ 44#endif /* P1SEG */
45 45
46/* Check if an address can be reached in 29 bits */ 46/* Check if an address can be reached in 29 bits */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 026dd659a640..f4314d8b05b8 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -244,18 +244,11 @@ __ioremap(unsigned long offset, unsigned long size, unsigned long flags)
244} 244}
245 245
246static inline void __iomem * 246static inline void __iomem *
247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 247__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
248{ 248{
249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) 249#ifdef CONFIG_29BIT
250 unsigned long last_addr = offset + size - 1; 250 unsigned long last_addr = offset + size - 1;
251#endif
252 void __iomem *ret;
253 251
254 ret = __ioremap_trapped(offset, size);
255 if (ret)
256 return ret;
257
258#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
259 /* 252 /*
260 * For P1 and P2 space this is trivial, as everything is already 253 * For P1 and P2 space this is trivial, as everything is already
261 * mapped. Uncached access for P1 addresses are done through P2. 254 * mapped. Uncached access for P1 addresses are done through P2.
@@ -274,6 +267,22 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
274 return (void __iomem *)P4SEGADDR(offset); 267 return (void __iomem *)P4SEGADDR(offset);
275#endif 268#endif
276 269
270 return NULL;
271}
272
273static inline void __iomem *
274__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
275{
276 void __iomem *ret;
277
278 ret = __ioremap_trapped(offset, size);
279 if (ret)
280 return ret;
281
282 ret = __ioremap_29bit(offset, size, flags);
283 if (ret)
284 return ret;
285
277 return __ioremap(offset, size, flags); 286 return __ioremap(offset, size, flags);
278} 287}
279#else 288#else
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index c7426ad9926e..4b0882bf5183 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -65,11 +65,29 @@ struct pmb_entry {
65 struct pmb_entry *link; 65 struct pmb_entry *link;
66}; 66};
67 67
68#ifdef CONFIG_PMB
68/* arch/sh/mm/pmb.c */ 69/* arch/sh/mm/pmb.c */
69long pmb_remap(unsigned long virt, unsigned long phys, 70long pmb_remap(unsigned long virt, unsigned long phys,
70 unsigned long size, unsigned long flags); 71 unsigned long size, unsigned long flags);
71void pmb_unmap(unsigned long addr); 72void pmb_unmap(unsigned long addr);
72int pmb_init(void); 73int pmb_init(void);
74#else
75static inline long pmb_remap(unsigned long virt, unsigned long phys,
76 unsigned long size, unsigned long flags)
77{
78 return -EINVAL
79}
80
81static inline void pmb_unmap(unsigned long addr)
82{
83}
84
85static inline int pmb_init(void)
86{
87 return -ENODEV;
88}
89#endif /* CONFIG_PMB */
90
73#endif /* __ASSEMBLY__ */ 91#endif /* __ASSEMBLY__ */
74 92
75#endif /* __MMU_H */ 93#endif /* __MMU_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 81bffc0d6860..a86c0f1d05d4 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -127,7 +127,7 @@ typedef struct page *pgtable_t;
127 * is not visible (it is part of the PMB mapping) and so needs to be 127 * is not visible (it is part of the PMB mapping) and so needs to be
128 * added or subtracted as required. 128 * added or subtracted as required.
129 */ 129 */
130#if defined(CONFIG_PMB_FIXED) 130#if defined(CONFIG_PMB_LEGACY)
131/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ 131/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
132#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) 132#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
133#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) 133#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 1151ecdffa71..e5d421db4c83 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -13,6 +13,8 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/mmu.h>
17#include <cpu/mmu_context.h>
16 18
17#ifdef CONFIG_CPU_SH4A 19#ifdef CONFIG_CPU_SH4A
18#define SYNCO() synco 20#define SYNCO() synco
@@ -33,7 +35,7 @@ ENTRY(empty_zero_page)
33 .long 1 /* LOADER_TYPE */ 35 .long 1 /* LOADER_TYPE */
34 .long 0x00000000 /* INITRD_START */ 36 .long 0x00000000 /* INITRD_START */
35 .long 0x00000000 /* INITRD_SIZE */ 37 .long 0x00000000 /* INITRD_SIZE */
36#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) 38#ifdef CONFIG_32BIT
37 .long 0x53453f00 + 32 /* "SE?" = 32 bit */ 39 .long 0x53453f00 + 32 /* "SE?" = 32 bit */
38#else 40#else
39 .long 0x53453f00 + 29 /* "SE?" = 29 bit */ 41 .long 0x53453f00 + 29 /* "SE?" = 29 bit */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8b0e69792cf4..f79ebe32a24a 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -455,10 +455,7 @@ void __init setup_arch(char **cmdline_p)
455 sh_mv.mv_setup(cmdline_p); 455 sh_mv.mv_setup(cmdline_p);
456 456
457 paging_init(); 457 paging_init();
458
459#ifdef CONFIG_PMB_ENABLE
460 pmb_init(); 458 pmb_init();
461#endif
462 459
463#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
464 plat_smp_setup(); 461 plat_smp_setup();
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index a1e4ec24f1f5..9e5a5878eeae 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -14,17 +14,16 @@ OUTPUT_ARCH(sh)
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm/vmlinux.lds.h> 15#include <asm/vmlinux.lds.h>
16 16
17#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY)
18#define MEMORY_OFFSET 0
19#else
20#define MEMORY_OFFSET (CONFIG_MEMORY_START & 0x1fffffff)
21#endif
22
17ENTRY(_start) 23ENTRY(_start)
18SECTIONS 24SECTIONS
19{ 25{
20#ifdef CONFIG_PMB_FIXED 26 . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
21 . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
22 CONFIG_ZERO_PAGE_OFFSET;
23#elif defined(CONFIG_32BIT)
24 . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
25#else
26 . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
27#endif
28 27
29 _text = .; /* Text and read-only data */ 28 _text = .; /* Text and read-only data */
30 29
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 358c860aeb9b..860cd24b4205 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -80,30 +80,18 @@ config 32BIT
80 bool 80 bool
81 default y if CPU_SH5 81 default y if CPU_SH5
82 82
83config PMB_ENABLE
84 bool "Support 32-bit physical addressing through PMB"
85 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
86 help
87 If you say Y here, physical addressing will be extended to
88 32-bits through the SH-4A PMB. If this is not set, legacy
89 29-bit physical addressing will be used.
90
91choice
92 prompt "PMB handling type"
93 depends on PMB_ENABLE
94 default PMB_FIXED
95
96config PMB 83config PMB
97 bool "PMB" 84 bool "Support 32-bit physical addressing through PMB"
98 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP 85 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
86 select 32BIT
99 help 87 help
100 If you say Y here, physical addressing will be extended to 88 If you say Y here, physical addressing will be extended to
101 32-bits through the SH-4A PMB. If this is not set, legacy 89 32-bits through the SH-4A PMB. If this is not set, legacy
102 29-bit physical addressing will be used. 90 29-bit physical addressing will be used.
103 91
104config PMB_FIXED 92config PMB_LEGACY
105 bool "fixed PMB" 93 bool "Support legacy boot mappings for PMB"
106 depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP 94 depends on PMB
107 select 32BIT 95 select 32BIT
108 help 96 help
109 If this option is enabled, fixed PMB mappings are inherited 97 If this option is enabled, fixed PMB mappings are inherited
@@ -111,8 +99,6 @@ config PMB_FIXED
111 management. This is the closest to legacy 29-bit physical mode, 99 management. This is the closest to legacy 29-bit physical mode,
112 and allows systems to support up to 512MiB of system memory. 100 and allows systems to support up to 512MiB of system memory.
113 101
114endchoice
115
116config X2TLB 102config X2TLB
117 bool "Enable extended TLB mode" 103 bool "Enable extended TLB mode"
118 depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL 104 depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 9fa11d655044..edde8bdd681d 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -33,7 +33,7 @@ obj-y += $(tlb-y)
33endif 33endif
34 34
35obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 35obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
36obj-$(CONFIG_PMB_ENABLE) += pmb.o 36obj-$(CONFIG_PMB) += pmb.o
37obj-$(CONFIG_NUMA) += numa.o 37obj-$(CONFIG_NUMA) += numa.o
38 38
39# Special flags for fault_64.o. This puts restrictions on the number of 39# Special flags for fault_64.o. This puts restrictions on the number of
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 280f6a166035..8f7dbf183fb0 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005, 2006, 2007 Paul Mundt 6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
279 } while (pmbe); 279 } while (pmbe);
280} 280}
281 281
282#ifdef CONFIG_PMB 282#ifdef CONFIG_PMB_LEGACY
283int __uses_jump_to_uncached pmb_init(void) 283static int pmb_apply_legacy_mappings(void)
284{
285 unsigned int i;
286 long size, ret;
287
288 jump_to_uncached();
289
290 /*
291 * Insert PMB entries for the P1 and P2 areas so that, after
292 * we've switched the MMU to 32-bit mode, the semantics of P1
293 * and P2 are the same as in 29-bit mode, e.g.
294 *
295 * P1 - provides a cached window onto physical memory
296 * P2 - provides an uncached window onto physical memory
297 */
298 size = __MEMORY_START + __MEMORY_SIZE;
299
300 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
301 BUG_ON(ret != size);
302
303 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
304 BUG_ON(ret != size);
305
306 ctrl_outl(0, PMB_IRMCR);
307
308 /* PMB.SE and UB[7] */
309 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
310
311 /* Flush out the TLB */
312 i = ctrl_inl(MMUCR);
313 i |= MMUCR_TI;
314 ctrl_outl(i, MMUCR);
315
316 back_to_cached();
317
318 return 0;
319}
320#else
321int __uses_jump_to_uncached pmb_init(void)
322{ 284{
323 int i; 285 int i;
324 unsigned long addr, data; 286 unsigned long addr, data;
325 287 unsigned int applied = 0;
326 jump_to_uncached();
327 288
328 for (i = 0; i < PMB_ENTRY_MAX; i++) { 289 for (i = 0; i < PMB_ENTRY_MAX; i++) {
329 struct pmb_entry *pmbe; 290 struct pmb_entry *pmbe;
@@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void)
357 318
358 pmbe = pmb_alloc(vpn, ppn, flags, i); 319 pmbe = pmb_alloc(vpn, ppn, flags, i);
359 WARN_ON(IS_ERR(pmbe)); 320 WARN_ON(IS_ERR(pmbe));
321
322 applied++;
323 }
324
325 return (applied == 0);
326}
327#else
328static inline int pmb_apply_legacy_mappings(void)
329{
330 return 1;
331}
332#endif
333
334int __uses_jump_to_uncached pmb_init(void)
335{
336 unsigned int i;
337 unsigned long size, ret;
338
339 jump_to_uncached();
340
341 /*
342 * Attempt to apply the legacy boot mappings if configured. If
343 * this is successful then we simply carry on with those and
344 * don't bother establishing additional memory mappings. Dynamic
345 * device mappings through pmb_remap() can still be bolted on
346 * after this.
347 */
348 ret = pmb_apply_legacy_mappings();
349 if (ret == 0) {
350 back_to_cached();
351 return 0;
360 } 352 }
361 353
354 /*
355 * Insert PMB entries for the P1 and P2 areas so that, after
356 * we've switched the MMU to 32-bit mode, the semantics of P1
357 * and P2 are the same as in 29-bit mode, e.g.
358 *
359 * P1 - provides a cached window onto physical memory
360 * P2 - provides an uncached window onto physical memory
361 */
362 size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
363
364 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
365 BUG_ON(ret != size);
366
367 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
368 BUG_ON(ret != size);
369
370 ctrl_outl(0, PMB_IRMCR);
371
372 /* PMB.SE and UB[7] */
373 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
374
375 /* Flush out the TLB */
376 i = ctrl_inl(MMUCR);
377 i |= MMUCR_TI;
378 ctrl_outl(i, MMUCR);
379
362 back_to_cached(); 380 back_to_cached();
363 381
364 return 0; 382 return 0;
365} 383}
366#endif /* CONFIG_PMB */
367 384
368static int pmb_seq_show(struct seq_file *file, void *iter) 385static int pmb_seq_show(struct seq_file *file, void *iter)
369{ 386{
@@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void)
462{ 479{
463 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); 480 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
464} 481}
465
466subsys_initcall(pmb_sysdev_init); 482subsys_initcall(pmb_sysdev_init);
467#endif 483#endif