diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-10-06 17:22:30 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-10 08:52:03 -0400 |
commit | 3105121949b609964f370d42d1b90fe7fc01d6b1 (patch) | |
tree | 20a7a6afa6d8023d20dcc7509a253268e0afdebc /arch/sh/mm/pmb.c | |
parent | edd7de803c79c7df117bf3f0e22ffdba1b1ef256 (diff) |
sh: Remap physical memory into P1 and P2 in pmb_init()
Eventually we'll have complete control over what physical memory gets
mapped where and we can probably do other interesting things. For now
though, when the MMU is in 32-bit mode, we map physical memory into the
P1 and P2 virtual address ranges with the same semantics as they have in
29-bit mode.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 54 |
1 files changed, 15 insertions, 39 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index baf365fcdb4a..2d009bdcf901 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -38,26 +38,6 @@ static void __pmb_unmap(struct pmb_entry *); | |||
38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | ||
42 | /* vpn ppn flags (ub/sz/c/wt) */ | ||
43 | |||
44 | /* P1 Section Mappings */ | ||
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | ||
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | ||
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | |||
52 | /* P2 Section Mappings */ | ||
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | ||
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
59 | }; | ||
60 | |||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 41 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 42 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -156,13 +136,7 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
156 | unsigned int entry = pmbe->entry; | 136 | unsigned int entry = pmbe->entry; |
157 | unsigned long addr; | 137 | unsigned long addr; |
158 | 138 | ||
159 | /* | 139 | if (unlikely(entry >= NR_PMB_ENTRIES)) |
160 | * Don't allow clearing of wired init entries, P1 or P2 access | ||
161 | * without a corresponding mapping in the PMB will lead to reset | ||
162 | * by the TLB. | ||
163 | */ | ||
164 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | ||
165 | entry >= NR_PMB_ENTRIES)) | ||
166 | return; | 140 | return; |
167 | 141 | ||
168 | jump_to_uncached(); | 142 | jump_to_uncached(); |
@@ -300,28 +274,30 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
300 | 274 | ||
301 | int __uses_jump_to_uncached pmb_init(void) | 275 | int __uses_jump_to_uncached pmb_init(void) |
302 | { | 276 | { |
303 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | 277 | unsigned int i; |
304 | unsigned int entry, i; | 278 | long size; |
305 | |||
306 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | ||
307 | 279 | ||
308 | jump_to_uncached(); | 280 | jump_to_uncached(); |
309 | 281 | ||
310 | /* | 282 | /* |
311 | * Ordering is important, P2 must be mapped in the PMB before we | 283 | * Insert PMB entries for the P1 and P2 areas so that, after |
312 | * can set PMB.SE, and P1 must be mapped before we jump back to | 284 | * we've switched the MMU to 32-bit mode, the semantics of P1 |
313 | * P1 space. | 285 | * and P2 are the same as in 29-bit mode, e.g. |
286 | * | ||
287 | * P1 - provides a cached window onto physical memory | ||
288 | * P2 - provides an uncached window onto physical memory | ||
314 | */ | 289 | */ |
315 | for (entry = 0; entry < nr_entries; entry++) { | 290 | size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE, |
316 | struct pmb_entry *pmbe = pmb_init_map + entry; | 291 | PMB_WT | PMB_UB); |
292 | BUG_ON(size != __MEMORY_SIZE); | ||
317 | 293 | ||
318 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry); | 294 | size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C); |
319 | } | 295 | BUG_ON(size != __MEMORY_SIZE); |
320 | 296 | ||
321 | ctrl_outl(0, PMB_IRMCR); | 297 | ctrl_outl(0, PMB_IRMCR); |
322 | 298 | ||
323 | /* PMB.SE and UB[7] */ | 299 | /* PMB.SE and UB[7] */ |
324 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | 300 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); |
325 | 301 | ||
326 | /* Flush out the TLB */ | 302 | /* Flush out the TLB */ |
327 | i = ctrl_inl(MMUCR); | 303 | i = ctrl_inl(MMUCR); |