diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 10 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 1 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 243 | ||||
-rw-r--r-- | arch/sh/mm/uncached.c | 6 |
4 files changed, 213 insertions, 47 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 65cb5b83e072..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -91,16 +91,6 @@ config PMB | |||
91 | 32-bits through the SH-4A PMB. If this is not set, legacy | 91 | 32-bits through the SH-4A PMB. If this is not set, legacy |
92 | 29-bit physical addressing will be used. | 92 | 29-bit physical addressing will be used. |
93 | 93 | ||
94 | config PMB_LEGACY | ||
95 | bool "Support legacy boot mappings for PMB" | ||
96 | depends on PMB | ||
97 | select 32BIT | ||
98 | help | ||
99 | If this option is enabled, fixed PMB mappings are inherited | ||
100 | from the boot loader, and the kernel does not attempt dynamic | ||
101 | management. This is the closest to legacy 29-bit physical mode, | ||
102 | and allows systems to support up to 512MiB of system memory. | ||
103 | |||
104 | config X2TLB | 94 | config X2TLB |
105 | def_bool y | 95 | def_bool y |
106 | depends on (CPU_SHX2 || CPU_SHX3) && MMU | 96 | depends on (CPU_SHX2 || CPU_SHX3) && MMU |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 08e280d7cc7e..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -245,7 +245,6 @@ void __init mem_init(void) | |||
245 | memset(empty_zero_page, 0, PAGE_SIZE); | 245 | memset(empty_zero_page, 0, PAGE_SIZE); |
246 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 246 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
247 | 247 | ||
248 | uncached_init(); | ||
249 | vsyscall_init(); | 248 | vsyscall_init(); |
250 | 249 | ||
251 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 250 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b9d5476e1284..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -52,7 +52,7 @@ struct pmb_entry { | |||
52 | struct pmb_entry *link; | 52 | struct pmb_entry *link; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static void pmb_unmap_entry(struct pmb_entry *); | 55 | static void pmb_unmap_entry(struct pmb_entry *, int depth); |
56 | 56 | ||
57 | static DEFINE_RWLOCK(pmb_rwlock); | 57 | static DEFINE_RWLOCK(pmb_rwlock); |
58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
@@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
115 | 115 | ||
116 | pmbe = &pmb_entry_list[pos]; | 116 | pmbe = &pmb_entry_list[pos]; |
117 | 117 | ||
118 | memset(pmbe, 0, sizeof(struct pmb_entry)); | ||
119 | |||
118 | spin_lock_init(&pmbe->lock); | 120 | spin_lock_init(&pmbe->lock); |
119 | 121 | ||
120 | pmbe->vpn = vpn; | 122 | pmbe->vpn = vpn; |
121 | pmbe->ppn = ppn; | 123 | pmbe->ppn = ppn; |
122 | pmbe->flags = flags; | 124 | pmbe->flags = flags; |
123 | pmbe->entry = pos; | 125 | pmbe->entry = pos; |
124 | pmbe->size = 0; | ||
125 | 126 | ||
126 | return pmbe; | 127 | return pmbe; |
127 | 128 | ||
@@ -133,7 +134,9 @@ out: | |||
133 | static void pmb_free(struct pmb_entry *pmbe) | 134 | static void pmb_free(struct pmb_entry *pmbe) |
134 | { | 135 | { |
135 | __clear_bit(pmbe->entry, pmb_map); | 136 | __clear_bit(pmbe->entry, pmb_map); |
136 | pmbe->entry = PMB_NO_ENTRY; | 137 | |
138 | pmbe->entry = PMB_NO_ENTRY; | ||
139 | pmbe->link = NULL; | ||
137 | } | 140 | } |
138 | 141 | ||
139 | /* | 142 | /* |
@@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void) | |||
161 | */ | 164 | */ |
162 | static void __set_pmb_entry(struct pmb_entry *pmbe) | 165 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
163 | { | 166 | { |
164 | pmbe->flags &= ~PMB_CACHE_MASK; | ||
165 | pmbe->flags |= pmb_cache_flags(); | ||
166 | |||
167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); | 167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); |
168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, | 168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, |
169 | mk_pmb_data(pmbe->entry)); | 169 | mk_pmb_data(pmbe->entry)); |
@@ -280,7 +280,7 @@ again: | |||
280 | return wanted - size; | 280 | return wanted - size; |
281 | 281 | ||
282 | out: | 282 | out: |
283 | pmb_unmap_entry(pmbp); | 283 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); |
284 | 284 | ||
285 | return err; | 285 | return err; |
286 | } | 286 | } |
@@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr) | |||
302 | 302 | ||
303 | read_unlock(&pmb_rwlock); | 303 | read_unlock(&pmb_rwlock); |
304 | 304 | ||
305 | pmb_unmap_entry(pmbe); | 305 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
306 | } | 306 | } |
307 | 307 | ||
308 | static void pmb_unmap_entry(struct pmb_entry *pmbe) | 308 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) |
309 | { | 309 | { |
310 | unsigned long flags; | 310 | return (b->vpn == (a->vpn + a->size)) && |
311 | (b->ppn == (a->ppn + a->size)) && | ||
312 | (b->flags == a->flags); | ||
313 | } | ||
311 | 314 | ||
312 | if (unlikely(!pmbe)) | 315 | static bool pmb_size_valid(unsigned long size) |
313 | return; | 316 | { |
317 | int i; | ||
314 | 318 | ||
315 | write_lock_irqsave(&pmb_rwlock, flags); | 319 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) |
320 | if (pmb_sizes[i].size == size) | ||
321 | return true; | ||
322 | |||
323 | return false; | ||
324 | } | ||
325 | |||
326 | static int pmb_size_to_flags(unsigned long size) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
331 | if (pmb_sizes[i].size == size) | ||
332 | return pmb_sizes[i].flag; | ||
316 | 333 | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
338 | { | ||
317 | do { | 339 | do { |
318 | struct pmb_entry *pmblink = pmbe; | 340 | struct pmb_entry *pmblink = pmbe; |
319 | 341 | ||
@@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) | |||
332 | pmbe = pmblink->link; | 354 | pmbe = pmblink->link; |
333 | 355 | ||
334 | pmb_free(pmblink); | 356 | pmb_free(pmblink); |
335 | } while (pmbe); | 357 | } while (pmbe && --depth); |
358 | } | ||
359 | |||
360 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
361 | { | ||
362 | unsigned long flags; | ||
336 | 363 | ||
364 | if (unlikely(!pmbe)) | ||
365 | return; | ||
366 | |||
367 | write_lock_irqsave(&pmb_rwlock, flags); | ||
368 | __pmb_unmap_entry(pmbe, depth); | ||
337 | write_unlock_irqrestore(&pmb_rwlock, flags); | 369 | write_unlock_irqrestore(&pmb_rwlock, flags); |
338 | } | 370 | } |
339 | 371 | ||
@@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | |||
342 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | 374 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); |
343 | } | 375 | } |
344 | 376 | ||
345 | static int pmb_synchronize_mappings(void) | 377 | static void __init pmb_notify(void) |
346 | { | 378 | { |
347 | unsigned int applied = 0; | 379 | int i; |
348 | struct pmb_entry *pmbp = NULL; | ||
349 | int i, j; | ||
350 | 380 | ||
351 | pr_info("PMB: boot mappings:\n"); | 381 | pr_info("PMB: boot mappings:\n"); |
352 | 382 | ||
383 | read_lock(&pmb_rwlock); | ||
384 | |||
385 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
386 | struct pmb_entry *pmbe; | ||
387 | |||
388 | if (!test_bit(i, pmb_map)) | ||
389 | continue; | ||
390 | |||
391 | pmbe = &pmb_entry_list[i]; | ||
392 | |||
393 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | ||
394 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | ||
395 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | ||
396 | } | ||
397 | |||
398 | read_unlock(&pmb_rwlock); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Sync our software copy of the PMB mappings with those in hardware. The | ||
403 | * mappings in the hardware PMB were either set up by the bootloader or | ||
404 | * very early on by the kernel. | ||
405 | */ | ||
406 | static void __init pmb_synchronize(void) | ||
407 | { | ||
408 | struct pmb_entry *pmbp = NULL; | ||
409 | int i, j; | ||
410 | |||
353 | /* | 411 | /* |
354 | * Run through the initial boot mappings, log the established | 412 | * Run through the initial boot mappings, log the established |
355 | * ones, and blow away anything that falls outside of the valid | 413 | * ones, and blow away anything that falls outside of the valid |
@@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void) | |||
432 | /* | 490 | /* |
433 | * Compare the previous entry against the current one to | 491 | * Compare the previous entry against the current one to |
434 | * see if the entries span a contiguous mapping. If so, | 492 | * see if the entries span a contiguous mapping. If so, |
435 | * setup the entry links accordingly. | 493 | * setup the entry links accordingly. Compound mappings |
494 | * are later coalesced. | ||
436 | */ | 495 | */ |
437 | if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && | 496 | if (pmb_can_merge(pmbp, pmbe)) |
438 | (pmbe->ppn == (pmbp->ppn + pmbp->size))) | ||
439 | pmbp->link = pmbe; | 497 | pmbp->link = pmbe; |
440 | 498 | ||
441 | spin_unlock(&pmbp->lock); | 499 | spin_unlock(&pmbp->lock); |
@@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void) | |||
444 | pmbp = pmbe; | 502 | pmbp = pmbe; |
445 | 503 | ||
446 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | 504 | spin_unlock_irqrestore(&pmbe->lock, irqflags); |
505 | } | ||
506 | } | ||
447 | 507 | ||
448 | pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", | 508 | static void __init pmb_merge(struct pmb_entry *head) |
449 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, | 509 | { |
450 | (data_val & PMB_C) ? "" : "un"); | 510 | unsigned long span, newsize; |
511 | struct pmb_entry *tail; | ||
512 | int i = 1, depth = 0; | ||
513 | |||
514 | span = newsize = head->size; | ||
451 | 515 | ||
452 | applied++; | 516 | tail = head->link; |
517 | while (tail) { | ||
518 | span += tail->size; | ||
519 | |||
520 | if (pmb_size_valid(span)) { | ||
521 | newsize = span; | ||
522 | depth = i; | ||
523 | } | ||
524 | |||
525 | /* This is the end of the line.. */ | ||
526 | if (!tail->link) | ||
527 | break; | ||
528 | |||
529 | tail = tail->link; | ||
530 | i++; | ||
453 | } | 531 | } |
454 | 532 | ||
455 | return (applied == 0); | 533 | /* |
534 | * The merged page size must be valid. | ||
535 | */ | ||
536 | if (!pmb_size_valid(newsize)) | ||
537 | return; | ||
538 | |||
539 | head->flags &= ~PMB_SZ_MASK; | ||
540 | head->flags |= pmb_size_to_flags(newsize); | ||
541 | |||
542 | head->size = newsize; | ||
543 | |||
544 | __pmb_unmap_entry(head->link, depth); | ||
545 | __set_pmb_entry(head); | ||
456 | } | 546 | } |
457 | 547 | ||
458 | int pmb_init(void) | 548 | static void __init pmb_coalesce(void) |
459 | { | 549 | { |
460 | int ret; | 550 | unsigned long flags; |
551 | int i; | ||
552 | |||
553 | write_lock_irqsave(&pmb_rwlock, flags); | ||
554 | |||
555 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
556 | struct pmb_entry *pmbe; | ||
557 | |||
558 | if (!test_bit(i, pmb_map)) | ||
559 | continue; | ||
560 | |||
561 | pmbe = &pmb_entry_list[i]; | ||
562 | |||
563 | /* | ||
564 | * We're only interested in compound mappings | ||
565 | */ | ||
566 | if (!pmbe->link) | ||
567 | continue; | ||
568 | |||
569 | /* | ||
570 | * Nothing to do if it already uses the largest possible | ||
571 | * page size. | ||
572 | */ | ||
573 | if (pmbe->size == SZ_512M) | ||
574 | continue; | ||
575 | |||
576 | pmb_merge(pmbe); | ||
577 | } | ||
578 | |||
579 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
580 | } | ||
581 | |||
582 | #ifdef CONFIG_UNCACHED_MAPPING | ||
583 | static void __init pmb_resize(void) | ||
584 | { | ||
585 | int i; | ||
461 | 586 | ||
462 | /* | 587 | /* |
463 | * Sync our software copy of the PMB mappings with those in | 588 | * If the uncached mapping was constructed by the kernel, it will |
464 | * hardware. The mappings in the hardware PMB were either set up | 589 | * already be a reasonable size. |
465 | * by the bootloader or very early on by the kernel. | ||
466 | */ | 590 | */ |
467 | ret = pmb_synchronize_mappings(); | 591 | if (uncached_size == SZ_16M) |
468 | if (unlikely(ret == 0)) | 592 | return; |
469 | return 0; | 593 | |
594 | read_lock(&pmb_rwlock); | ||
595 | |||
596 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
597 | struct pmb_entry *pmbe; | ||
598 | unsigned long flags; | ||
599 | |||
600 | if (!test_bit(i, pmb_map)) | ||
601 | continue; | ||
602 | |||
603 | pmbe = &pmb_entry_list[i]; | ||
604 | |||
605 | if (pmbe->vpn != uncached_start) | ||
606 | continue; | ||
607 | |||
608 | /* | ||
609 | * Found it, now resize it. | ||
610 | */ | ||
611 | spin_lock_irqsave(&pmbe->lock, flags); | ||
612 | |||
613 | pmbe->size = SZ_16M; | ||
614 | pmbe->flags &= ~PMB_SZ_MASK; | ||
615 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | ||
616 | |||
617 | uncached_resize(pmbe->size); | ||
618 | |||
619 | __set_pmb_entry(pmbe); | ||
620 | |||
621 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
622 | } | ||
623 | |||
624 | read_lock(&pmb_rwlock); | ||
625 | } | ||
626 | #endif | ||
627 | |||
628 | void __init pmb_init(void) | ||
629 | { | ||
630 | /* Synchronize software state */ | ||
631 | pmb_synchronize(); | ||
632 | |||
633 | /* Attempt to combine compound mappings */ | ||
634 | pmb_coalesce(); | ||
635 | |||
636 | #ifdef CONFIG_UNCACHED_MAPPING | ||
637 | /* Resize initial mappings, if necessary */ | ||
638 | pmb_resize(); | ||
639 | #endif | ||
640 | |||
641 | /* Log them */ | ||
642 | pmb_notify(); | ||
470 | 643 | ||
471 | writel_uncached(0, PMB_IRMCR); | 644 | writel_uncached(0, PMB_IRMCR); |
472 | 645 | ||
473 | /* Flush out the TLB */ | 646 | /* Flush out the TLB */ |
474 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); | 647 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); |
475 | ctrl_barrier(); | 648 | ctrl_barrier(); |
476 | |||
477 | return 0; | ||
478 | } | 649 | } |
479 | 650 | ||
480 | bool __in_29bit_mode(void) | 651 | bool __in_29bit_mode(void) |
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index 807906981d9d..cf20a5c5136a 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c | |||
@@ -26,3 +26,9 @@ void __init uncached_init(void) | |||
26 | uncached_start = memory_end; | 26 | uncached_start = memory_end; |
27 | uncached_end = uncached_start + uncached_size; | 27 | uncached_end = uncached_start + uncached_size; |
28 | } | 28 | } |
29 | |||
30 | void __init uncached_resize(unsigned long size) | ||
31 | { | ||
32 | uncached_size = size; | ||
33 | uncached_end = uncached_start + uncached_size; | ||
34 | } | ||