aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/io.h4
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/pmb.c54
3 files changed, 18 insertions, 42 deletions
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 5be45ea4dfec..0cf2a5708e26 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -246,7 +246,7 @@ void __iounmap(void __iomem *addr);
246static inline void __iomem * 246static inline void __iomem *
247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 247__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
248{ 248{
249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) 249#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
250 unsigned long last_addr = offset + size - 1; 250 unsigned long last_addr = offset + size - 1;
251#endif 251#endif
252 void __iomem *ret; 252 void __iomem *ret;
@@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
255 if (ret) 255 if (ret)
256 return ret; 256 return ret;
257 257
258#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) 258#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
259 /* 259 /*
260 * For P1 and P2 space this is trivial, as everything is already 260 * For P1 and P2 space this is trivial, as everything is already
261 * mapped. Uncached access for P1 addresses are done through P2. 261 * mapped. Uncached access for P1 addresses are done through P2.
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index e098ec158ddb..9a8403d9344b 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(dma_free_coherent);
85void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 85void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
86 enum dma_data_direction direction) 86 enum dma_data_direction direction)
87{ 87{
88#ifdef CONFIG_CPU_SH5 88#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
89 void *p1addr = vaddr; 89 void *p1addr = vaddr;
90#else 90#else
91 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); 91 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index baf365fcdb4a..2d009bdcf901 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -38,26 +38,6 @@ static void __pmb_unmap(struct pmb_entry *);
38static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 38static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
39static unsigned long pmb_map; 39static unsigned long pmb_map;
40 40
41static struct pmb_entry pmb_init_map[] = {
42 /* vpn ppn flags (ub/sz/c/wt) */
43
44 /* P1 Section Mappings */
45 { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
46 { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
47 { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
48 { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
49 { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
50 { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
51
52 /* P2 Section Mappings */
53 { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
54 { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
55 { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
56 { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
57 { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
58 { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
59};
60
61static inline unsigned long mk_pmb_entry(unsigned int entry) 41static inline unsigned long mk_pmb_entry(unsigned int entry)
62{ 42{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 43 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -156,13 +136,7 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
156 unsigned int entry = pmbe->entry; 136 unsigned int entry = pmbe->entry;
157 unsigned long addr; 137 unsigned long addr;
158 138
159 /* 139 if (unlikely(entry >= NR_PMB_ENTRIES))
160 * Don't allow clearing of wired init entries, P1 or P2 access
161 * without a corresponding mapping in the PMB will lead to reset
162 * by the TLB.
163 */
164 if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
165 entry >= NR_PMB_ENTRIES))
166 return; 140 return;
167 141
168 jump_to_uncached(); 142 jump_to_uncached();
@@ -300,28 +274,30 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
300 274
301int __uses_jump_to_uncached pmb_init(void) 275int __uses_jump_to_uncached pmb_init(void)
302{ 276{
303 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); 277 unsigned int i;
304 unsigned int entry, i; 278 long size;
305
306 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
307 279
308 jump_to_uncached(); 280 jump_to_uncached();
309 281
310 /* 282 /*
311 * Ordering is important, P2 must be mapped in the PMB before we 283 * Insert PMB entries for the P1 and P2 areas so that, after
312 * can set PMB.SE, and P1 must be mapped before we jump back to 284 * we've switched the MMU to 32-bit mode, the semantics of P1
313 * P1 space. 285 * and P2 are the same as in 29-bit mode, e.g.
286 *
287 * P1 - provides a cached window onto physical memory
288 * P2 - provides an uncached window onto physical memory
314 */ 289 */
315 for (entry = 0; entry < nr_entries; entry++) { 290 size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE,
316 struct pmb_entry *pmbe = pmb_init_map + entry; 291 PMB_WT | PMB_UB);
292 BUG_ON(size != __MEMORY_SIZE);
317 293
318 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry); 294 size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C);
319 } 295 BUG_ON(size != __MEMORY_SIZE);
320 296
321 ctrl_outl(0, PMB_IRMCR); 297 ctrl_outl(0, PMB_IRMCR);
322 298
323 /* PMB.SE and UB[7] */ 299 /* PMB.SE and UB[7] */
324 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); 300 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
325 301
326 /* Flush out the TLB */ 302 /* Flush out the TLB */
327 i = ctrl_inl(MMUCR); 303 i = ctrl_inl(MMUCR);