aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-01 00:54:14 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-01 00:54:14 -0400
commitac6a0cf6716bb46813d0161024c66c2af66e53d1 (patch)
treec7f53b1a04c590032c022549f3186fb9b04f8358 /arch/sh/mm
parente76a0136a3cf1859fbc07f122e42293d22229558 (diff)
parentce3f7cb96e67d6518c7fc7b361a76409c3817d64 (diff)
Merge branch 'master' into sh/smp
Conflicts: arch/sh/mm/cache-sh4.c
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig7
-rw-r--r--arch/sh/mm/cache-sh4.c74
-rw-r--r--arch/sh/mm/ioremap_32.c8
-rw-r--r--arch/sh/mm/tlb-sh4.c9
4 files changed, 71 insertions, 27 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 2795618e4f07..64dc1ad59801 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -82,7 +82,7 @@ config 32BIT
82 82
83config PMB_ENABLE 83config PMB_ENABLE
84 bool "Support 32-bit physical addressing through PMB" 84 bool "Support 32-bit physical addressing through PMB"
85 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) 85 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
86 select 32BIT 86 select 32BIT
87 default y 87 default y
88 help 88 help
@@ -97,7 +97,7 @@ choice
97 97
98config PMB 98config PMB
99 bool "PMB" 99 bool "PMB"
100 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) 100 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
101 select 32BIT 101 select 32BIT
102 help 102 help
103 If you say Y here, physical addressing will be extended to 103 If you say Y here, physical addressing will be extended to
@@ -106,7 +106,8 @@ config PMB
106 106
107config PMB_FIXED 107config PMB_FIXED
108 bool "fixed PMB" 108 bool "fixed PMB"
109 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ 109 depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \
110 CPU_SUBTYPE_SH7780 || \
110 CPU_SUBTYPE_SH7785) 111 CPU_SUBTYPE_SH7785)
111 select 32BIT 112 select 32BIT
112 help 113 help
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 2775f84d9aa3..70fb906419dd 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -455,7 +455,49 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
455 * Break the 1, 2 and 4 way variants of this out into separate functions to 455 * Break the 1, 2 and 4 way variants of this out into separate functions to
456 * avoid nearly all the overhead of having the conditional stuff in the function 456 * avoid nearly all the overhead of having the conditional stuff in the function
457 * bodies (+ the 1 and 2 way cases avoid saving any registers too). 457 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
458 *
459 * We want to eliminate unnecessary bus transactions, so this code uses
460 * a non-obvious technique.
461 *
462 * Loop over a cache way sized block of, one cache line at a time. For each
463 * line, use movca.a to cause the current cache line contents to be written
464 * back, but without reading anything from main memory. However this has the
465 * side effect that the cache is now caching that memory location. So follow
466 * this with a cache invalidate to mark the cache line invalid. And do all
467 * this with interrupts disabled, to avoid the cache line being accidently
468 * evicted while it is holding garbage.
469 *
470 * This also breaks in a number of circumstances:
471 * - if there are modifications to the region of memory just above
472 * empty_zero_page (for example because a breakpoint has been placed
473 * there), then these can be lost.
474 *
475 * This is because the the memory address which the cache temporarily
476 * caches in the above description is empty_zero_page. So the
477 * movca.l hits the cache (it is assumed that it misses, or at least
478 * isn't dirty), modifies the line and then invalidates it, losing the
479 * required change.
480 *
481 * - If caches are disabled or configured in write-through mode, then
482 * the movca.l writes garbage directly into memory.
458 */ 483 */
484static void __flush_dcache_segment_writethrough(unsigned long start,
485 unsigned long extent_per_way)
486{
487 unsigned long addr;
488 int i;
489
490 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
491
492 while (extent_per_way) {
493 for (i = 0; i < cpu_data->dcache.ways; i++)
494 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
495
496 addr += cpu_data->dcache.linesz;
497 extent_per_way -= cpu_data->dcache.linesz;
498 }
499}
500
459static void __flush_dcache_segment_1way(unsigned long start, 501static void __flush_dcache_segment_1way(unsigned long start,
460 unsigned long extent_per_way) 502 unsigned long extent_per_way)
461{ 503{
@@ -655,24 +697,30 @@ extern void __weak sh4__flush_region_init(void);
655 */ 697 */
656void __init sh4_cache_init(void) 698void __init sh4_cache_init(void)
657{ 699{
700 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
701
658 printk("PVR=%08x CVR=%08x PRR=%08x\n", 702 printk("PVR=%08x CVR=%08x PRR=%08x\n",
659 ctrl_inl(CCN_PVR), 703 ctrl_inl(CCN_PVR),
660 ctrl_inl(CCN_CVR), 704 ctrl_inl(CCN_CVR),
661 ctrl_inl(CCN_PRR)); 705 ctrl_inl(CCN_PRR));
662 706
663 switch (boot_cpu_data.dcache.ways) { 707 if (wt_enabled)
664 case 1: 708 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
665 __flush_dcache_segment_fn = __flush_dcache_segment_1way; 709 else {
666 break; 710 switch (boot_cpu_data.dcache.ways) {
667 case 2: 711 case 1:
668 __flush_dcache_segment_fn = __flush_dcache_segment_2way; 712 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
669 break; 713 break;
670 case 4: 714 case 2:
671 __flush_dcache_segment_fn = __flush_dcache_segment_4way; 715 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
672 break; 716 break;
673 default: 717 case 4:
674 panic("unknown number of cache ways\n"); 718 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
675 break; 719 break;
720 default:
721 panic("unknown number of cache ways\n");
722 break;
723 }
676 } 724 }
677 725
678 local_flush_icache_range = sh4_flush_icache_range; 726 local_flush_icache_range = sh4_flush_icache_range;
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index da2f4186f2cd..c3250614e3ae 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -57,14 +57,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
57 if (is_pci_memory_fixed_range(phys_addr, size)) 57 if (is_pci_memory_fixed_range(phys_addr, size))
58 return (void __iomem *)phys_addr; 58 return (void __iomem *)phys_addr;
59 59
60#if !defined(CONFIG_PMB_FIXED)
61 /*
62 * Don't allow anybody to remap normal RAM that we're using..
63 */
64 if (phys_addr < virt_to_phys(high_memory))
65 return NULL;
66#endif
67
68 /* 60 /*
69 * Mappings have to be page-aligned 61 * Mappings have to be page-aligned
70 */ 62 */
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 7d3c63e707a5..8cf550e2570f 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -43,9 +43,12 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
43 */ 43 */
44 ctrl_outl(pte.pte_high, MMU_PTEA); 44 ctrl_outl(pte.pte_high, MMU_PTEA);
45#else 45#else
46 if (cpu_data->flags & CPU_HAS_PTEA) 46 if (cpu_data->flags & CPU_HAS_PTEA) {
47 /* TODO: make this look less hacky */ 47 /* The last 3 bits and the first one of pteval contains
48 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); 48 * the PTEA timing control and space attribute bits
49 */
50 ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA);
51 }
49#endif 52#endif
50 53
51 /* Set PTEL register */ 54 /* Set PTEL register */