aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c243
1 files changed, 207 insertions, 36 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b9d5476e1284..198bcff5e96f 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -52,7 +52,7 @@ struct pmb_entry {
52 struct pmb_entry *link; 52 struct pmb_entry *link;
53}; 53};
54 54
55static void pmb_unmap_entry(struct pmb_entry *); 55static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 56
57static DEFINE_RWLOCK(pmb_rwlock); 57static DEFINE_RWLOCK(pmb_rwlock);
58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
@@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
115 115
116 pmbe = &pmb_entry_list[pos]; 116 pmbe = &pmb_entry_list[pos];
117 117
118 memset(pmbe, 0, sizeof(struct pmb_entry));
119
118 spin_lock_init(&pmbe->lock); 120 spin_lock_init(&pmbe->lock);
119 121
120 pmbe->vpn = vpn; 122 pmbe->vpn = vpn;
121 pmbe->ppn = ppn; 123 pmbe->ppn = ppn;
122 pmbe->flags = flags; 124 pmbe->flags = flags;
123 pmbe->entry = pos; 125 pmbe->entry = pos;
124 pmbe->size = 0;
125 126
126 return pmbe; 127 return pmbe;
127 128
@@ -133,7 +134,9 @@ out:
133static void pmb_free(struct pmb_entry *pmbe) 134static void pmb_free(struct pmb_entry *pmbe)
134{ 135{
135 __clear_bit(pmbe->entry, pmb_map); 136 __clear_bit(pmbe->entry, pmb_map);
136 pmbe->entry = PMB_NO_ENTRY; 137
138 pmbe->entry = PMB_NO_ENTRY;
139 pmbe->link = NULL;
137} 140}
138 141
139/* 142/*
@@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void)
161 */ 164 */
162static void __set_pmb_entry(struct pmb_entry *pmbe) 165static void __set_pmb_entry(struct pmb_entry *pmbe)
163{ 166{
164 pmbe->flags &= ~PMB_CACHE_MASK;
165 pmbe->flags |= pmb_cache_flags();
166
167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, 168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169 mk_pmb_data(pmbe->entry)); 169 mk_pmb_data(pmbe->entry));
@@ -280,7 +280,7 @@ again:
280 return wanted - size; 280 return wanted - size;
281 281
282out: 282out:
283 pmb_unmap_entry(pmbp); 283 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
284 284
285 return err; 285 return err;
286} 286}
@@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr)
302 302
303 read_unlock(&pmb_rwlock); 303 read_unlock(&pmb_rwlock);
304 304
305 pmb_unmap_entry(pmbe); 305 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
306} 306}
307 307
308static void pmb_unmap_entry(struct pmb_entry *pmbe) 308static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
309{ 309{
310 unsigned long flags; 310 return (b->vpn == (a->vpn + a->size)) &&
311 (b->ppn == (a->ppn + a->size)) &&
312 (b->flags == a->flags);
313}
311 314
312 if (unlikely(!pmbe)) 315static bool pmb_size_valid(unsigned long size)
313 return; 316{
317 int i;
314 318
315 write_lock_irqsave(&pmb_rwlock, flags); 319 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320 if (pmb_sizes[i].size == size)
321 return true;
322
323 return false;
324}
325
326static int pmb_size_to_flags(unsigned long size)
327{
328 int i;
329
330 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331 if (pmb_sizes[i].size == size)
332 return pmb_sizes[i].flag;
316 333
334 return 0;
335}
336
337static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
338{
317 do { 339 do {
318 struct pmb_entry *pmblink = pmbe; 340 struct pmb_entry *pmblink = pmbe;
319 341
@@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe)
332 pmbe = pmblink->link; 354 pmbe = pmblink->link;
333 355
334 pmb_free(pmblink); 356 pmb_free(pmblink);
335 } while (pmbe); 357 } while (pmbe && --depth);
358}
359
360static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
361{
362 unsigned long flags;
336 363
364 if (unlikely(!pmbe))
365 return;
366
367 write_lock_irqsave(&pmb_rwlock, flags);
368 __pmb_unmap_entry(pmbe, depth);
337 write_unlock_irqrestore(&pmb_rwlock, flags); 369 write_unlock_irqrestore(&pmb_rwlock, flags);
338} 370}
339 371
@@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
342 return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 374 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
343} 375}
344 376
345static int pmb_synchronize_mappings(void) 377static void __init pmb_notify(void)
346{ 378{
347 unsigned int applied = 0; 379 int i;
348 struct pmb_entry *pmbp = NULL;
349 int i, j;
350 380
351 pr_info("PMB: boot mappings:\n"); 381 pr_info("PMB: boot mappings:\n");
352 382
383 read_lock(&pmb_rwlock);
384
385 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
386 struct pmb_entry *pmbe;
387
388 if (!test_bit(i, pmb_map))
389 continue;
390
391 pmbe = &pmb_entry_list[i];
392
393 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
394 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
395 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
396 }
397
398 read_unlock(&pmb_rwlock);
399}
400
401/*
402 * Sync our software copy of the PMB mappings with those in hardware. The
403 * mappings in the hardware PMB were either set up by the bootloader or
404 * very early on by the kernel.
405 */
406static void __init pmb_synchronize(void)
407{
408 struct pmb_entry *pmbp = NULL;
409 int i, j;
410
353 /* 411 /*
354 * Run through the initial boot mappings, log the established 412 * Run through the initial boot mappings, log the established
355 * ones, and blow away anything that falls outside of the valid 413 * ones, and blow away anything that falls outside of the valid
@@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void)
432 /* 490 /*
433 * Compare the previous entry against the current one to 491 * Compare the previous entry against the current one to
434 * see if the entries span a contiguous mapping. If so, 492 * see if the entries span a contiguous mapping. If so,
435 * setup the entry links accordingly. 493 * setup the entry links accordingly. Compound mappings
494 * are later coalesced.
436 */ 495 */
437 if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && 496 if (pmb_can_merge(pmbp, pmbe))
438 (pmbe->ppn == (pmbp->ppn + pmbp->size)))
439 pmbp->link = pmbe; 497 pmbp->link = pmbe;
440 498
441 spin_unlock(&pmbp->lock); 499 spin_unlock(&pmbp->lock);
@@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void)
444 pmbp = pmbe; 502 pmbp = pmbe;
445 503
446 spin_unlock_irqrestore(&pmbe->lock, irqflags); 504 spin_unlock_irqrestore(&pmbe->lock, irqflags);
505 }
506}
447 507
448 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", 508static void __init pmb_merge(struct pmb_entry *head)
449 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, 509{
450 (data_val & PMB_C) ? "" : "un"); 510 unsigned long span, newsize;
511 struct pmb_entry *tail;
512 int i = 1, depth = 0;
513
514 span = newsize = head->size;
451 515
452 applied++; 516 tail = head->link;
517 while (tail) {
518 span += tail->size;
519
520 if (pmb_size_valid(span)) {
521 newsize = span;
522 depth = i;
523 }
524
525 /* This is the end of the line.. */
526 if (!tail->link)
527 break;
528
529 tail = tail->link;
530 i++;
453 } 531 }
454 532
455 return (applied == 0); 533 /*
534 * The merged page size must be valid.
535 */
536 if (!pmb_size_valid(newsize))
537 return;
538
539 head->flags &= ~PMB_SZ_MASK;
540 head->flags |= pmb_size_to_flags(newsize);
541
542 head->size = newsize;
543
544 __pmb_unmap_entry(head->link, depth);
545 __set_pmb_entry(head);
456} 546}
457 547
458int pmb_init(void) 548static void __init pmb_coalesce(void)
459{ 549{
460 int ret; 550 unsigned long flags;
551 int i;
552
553 write_lock_irqsave(&pmb_rwlock, flags);
554
555 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
556 struct pmb_entry *pmbe;
557
558 if (!test_bit(i, pmb_map))
559 continue;
560
561 pmbe = &pmb_entry_list[i];
562
563 /*
564 * We're only interested in compound mappings
565 */
566 if (!pmbe->link)
567 continue;
568
569 /*
570 * Nothing to do if it already uses the largest possible
571 * page size.
572 */
573 if (pmbe->size == SZ_512M)
574 continue;
575
576 pmb_merge(pmbe);
577 }
578
579 write_unlock_irqrestore(&pmb_rwlock, flags);
580}
581
582#ifdef CONFIG_UNCACHED_MAPPING
583static void __init pmb_resize(void)
584{
585 int i;
461 586
462 /* 587 /*
463 * Sync our software copy of the PMB mappings with those in 588 * If the uncached mapping was constructed by the kernel, it will
464 * hardware. The mappings in the hardware PMB were either set up 589 * already be a reasonable size.
465 * by the bootloader or very early on by the kernel.
466 */ 590 */
467 ret = pmb_synchronize_mappings(); 591 if (uncached_size == SZ_16M)
468 if (unlikely(ret == 0)) 592 return;
469 return 0; 593
594 read_lock(&pmb_rwlock);
595
596 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
597 struct pmb_entry *pmbe;
598 unsigned long flags;
599
600 if (!test_bit(i, pmb_map))
601 continue;
602
603 pmbe = &pmb_entry_list[i];
604
605 if (pmbe->vpn != uncached_start)
606 continue;
607
608 /*
609 * Found it, now resize it.
610 */
611 spin_lock_irqsave(&pmbe->lock, flags);
612
613 pmbe->size = SZ_16M;
614 pmbe->flags &= ~PMB_SZ_MASK;
615 pmbe->flags |= pmb_size_to_flags(pmbe->size);
616
617 uncached_resize(pmbe->size);
618
619 __set_pmb_entry(pmbe);
620
621 spin_unlock_irqrestore(&pmbe->lock, flags);
622 }
623
624 read_lock(&pmb_rwlock);
625}
626#endif
627
628void __init pmb_init(void)
629{
630 /* Synchronize software state */
631 pmb_synchronize();
632
633 /* Attempt to combine compound mappings */
634 pmb_coalesce();
635
636#ifdef CONFIG_UNCACHED_MAPPING
637 /* Resize initial mappings, if necessary */
638 pmb_resize();
639#endif
640
641 /* Log them */
642 pmb_notify();
470 643
471 writel_uncached(0, PMB_IRMCR); 644 writel_uncached(0, PMB_IRMCR);
472 645
473 /* Flush out the TLB */ 646 /* Flush out the TLB */
474 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); 647 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
475 ctrl_barrier(); 648 ctrl_barrier();
476
477 return 0;
478} 649}
479 650
480bool __in_29bit_mode(void) 651bool __in_29bit_mode(void)