aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/mmu.h2
-rw-r--r--arch/sh/mm/pmb.c59
2 files changed, 30 insertions, 31 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 44c904341414..5453169bf052 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -59,6 +59,7 @@ struct pmb_entry {
59 unsigned long vpn; 59 unsigned long vpn;
60 unsigned long ppn; 60 unsigned long ppn;
61 unsigned long flags; 61 unsigned long flags;
62 unsigned long size;
62 63
63 /* 64 /*
64 * 0 .. NR_PMB_ENTRIES for specific entry selection, or 65 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
@@ -66,7 +67,6 @@ struct pmb_entry {
66 */ 67 */
67 int entry; 68 int entry;
68 69
69 struct pmb_entry *next;
70 /* Adjacent entry link for contiguous multi-entry mappings */ 70 /* Adjacent entry link for contiguous multi-entry mappings */
71 struct pmb_entry *link; 71 struct pmb_entry *link;
72}; 72};
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 924f3e4b3a82..f2ad6e374b64 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
90 pmbe->ppn = ppn; 90 pmbe->ppn = ppn;
91 pmbe->flags = flags; 91 pmbe->flags = flags;
92 pmbe->entry = pos; 92 pmbe->entry = pos;
93 pmbe->size = 0;
93 94
94 return pmbe; 95 return pmbe;
95} 96}
96 97
97static void pmb_free(struct pmb_entry *pmbe) 98static void pmb_free(struct pmb_entry *pmbe)
98{ 99{
99 int pos = pmbe->entry; 100 clear_bit(pmbe->entry, pmb_map);
100 101 pmbe->entry = PMB_NO_ENTRY;
101 pmbe->vpn = 0;
102 pmbe->ppn = 0;
103 pmbe->flags = 0;
104 pmbe->entry = 0;
105
106 clear_bit(pos, pmb_map);
107} 102}
108 103
109/* 104/*
@@ -198,6 +193,8 @@ again:
198 vaddr += pmb_sizes[i].size; 193 vaddr += pmb_sizes[i].size;
199 size -= pmb_sizes[i].size; 194 size -= pmb_sizes[i].size;
200 195
196 pmbe->size = pmb_sizes[i].size;
197
201 /* 198 /*
202 * Link adjacent entries that span multiple PMB entries 199 * Link adjacent entries that span multiple PMB entries
203 * for easier tear-down. 200 * for easier tear-down.
@@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe)
273 } while (pmbe); 270 } while (pmbe);
274} 271}
275 272
276static inline void 273static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
277pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
278{
279 unsigned int size;
280 const char *sz_str;
281
282 size = data_val & PMB_SZ_MASK;
283
284 sz_str = (size == PMB_SZ_16M) ? " 16MB":
285 (size == PMB_SZ_64M) ? " 64MB":
286 (size == PMB_SZ_128M) ? "128MB":
287 "512MB";
288
289 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
290 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
291 (data_val & PMB_C) ? "" : "un");
292}
293
294static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
295{ 274{
296 return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 275 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
297} 276}
@@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
299static int pmb_synchronize_mappings(void) 278static int pmb_synchronize_mappings(void)
300{ 279{
301 unsigned int applied = 0; 280 unsigned int applied = 0;
302 int i; 281 struct pmb_entry *pmbp = NULL;
282 int i, j;
303 283
304 pr_info("PMB: boot mappings:\n"); 284 pr_info("PMB: boot mappings:\n");
305 285
@@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void)
323 unsigned long addr, data; 303 unsigned long addr, data;
324 unsigned long addr_val, data_val; 304 unsigned long addr_val, data_val;
325 unsigned long ppn, vpn, flags; 305 unsigned long ppn, vpn, flags;
306 unsigned int size;
326 struct pmb_entry *pmbe; 307 struct pmb_entry *pmbe;
327 308
328 addr = mk_pmb_addr(i); 309 addr = mk_pmb_addr(i);
@@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void)
366 __raw_writel(data_val, data); 347 __raw_writel(data_val, data);
367 } 348 }
368 349
369 flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); 350 size = data_val & PMB_SZ_MASK;
351 flags = size | (data_val & PMB_CACHE_MASK);
370 352
371 pmbe = pmb_alloc(vpn, ppn, flags, i); 353 pmbe = pmb_alloc(vpn, ppn, flags, i);
372 if (IS_ERR(pmbe)) { 354 if (IS_ERR(pmbe)) {
@@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void)
374 continue; 356 continue;
375 } 357 }
376 358
377 pmb_log_mapping(data_val, vpn, ppn); 359 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
360 if (pmb_sizes[j].flag == size)
361 pmbe->size = pmb_sizes[j].size;
362
363 /*
364 * Compare the previous entry against the current one to
365 * see if the entries span a contiguous mapping. If so,
366 * setup the entry links accordingly.
367 */
368 if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
369 (pmbe->ppn == (pmbp->ppn + pmbp->size))))
370 pmbp->link = pmbe;
371
372 pmbp = pmbe;
373
374 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
375 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
376 (data_val & PMB_C) ? "" : "un");
378 377
379 applied++; 378 applied++;
380 } 379 }