aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/cache-sh4.c12
-rw-r--r--arch/sh/mm/cache-sh7705.c9
-rw-r--r--arch/sh/mm/pg-sh4.c22
-rw-r--r--arch/sh/mm/pg-sh7705.c31
-rw-r--r--arch/sh/mm/tlb-flush.c55
-rw-r--r--arch/sh/mm/tlb-sh3.c63
-rw-r--r--arch/sh/mm/tlb-sh4.c68
8 files changed, 76 insertions, 186 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index fddf6680ec4f..28b5102e1cdc 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -20,7 +20,7 @@ config CPU_SH4
20 bool 20 bool
21 select CPU_HAS_INTEVT 21 select CPU_HAS_INTEVT
22 select CPU_HAS_SR_RB 22 select CPU_HAS_SR_RB
23 select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 23 select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2
24 24
25config CPU_SH4A 25config CPU_SH4A
26 bool 26 bool
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index c6955157c989..72bb48773337 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -236,10 +236,20 @@ static inline void flush_cache_4096(unsigned long start,
236/* 236/*
237 * Write back & invalidate the D-cache of the page. 237 * Write back & invalidate the D-cache of the page.
238 * (To avoid "alias" issues) 238 * (To avoid "alias" issues)
239 *
240 * This uses a lazy write-back on UP, which is explicitly
241 * disabled on SMP.
239 */ 242 */
240void flush_dcache_page(struct page *page) 243void flush_dcache_page(struct page *page)
241{ 244{
242 if (test_bit(PG_mapped, &page->flags)) { 245#ifndef CONFIG_SMP
246 struct address_space *mapping = page_mapping(page);
247
248 if (mapping && !mapping_mapped(mapping))
249 set_bit(PG_dcache_dirty, &page->flags);
250 else
251#endif
252 {
243 unsigned long phys = PHYSADDR(page_address(page)); 253 unsigned long phys = PHYSADDR(page_address(page));
244 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 254 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
245 int i, n; 255 int i, n;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 045abdf078f5..2808b580d984 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -3,11 +3,11 @@
3 * 3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka 4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2004 Alex Song 5 * Copyright (C) 2004 Alex Song
6 * Copyright (C) 2006 Paul Mundt
6 * 7 *
7 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 10 * for more details.
10 *
11 */ 11 */
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mman.h> 13#include <linux/mman.h>
@@ -51,7 +51,6 @@ static inline void cache_wback_all(void)
51 51
52 if ((data & v) == v) 52 if ((data & v) == v)
53 ctrl_outl(data & ~v, addr); 53 ctrl_outl(data & ~v, addr);
54
55 } 54 }
56 55
57 addrstart += cpu_data->dcache.way_incr; 56 addrstart += cpu_data->dcache.way_incr;
@@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys)
128 */ 127 */
129void flush_dcache_page(struct page *page) 128void flush_dcache_page(struct page *page)
130{ 129{
131 if (test_bit(PG_mapped, &page->flags)) 130 struct address_space *mapping = page_mapping(page);
131
132 if (mapping && !mapping_mapped(mapping))
133 set_bit(PG_dcache_dirty, &page->flags);
134 else
132 __flush_dcache_page(PHYSADDR(page_address(page))); 135 __flush_dcache_page(PHYSADDR(page_address(page)));
133} 136}
134 137
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 3f98d2a4f936..cfc323551741 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[];
23 */ 23 */
24void clear_user_page(void *to, unsigned long address, struct page *page) 24void clear_user_page(void *to, unsigned long address, struct page *page)
25{ 25{
26 __set_bit(PG_mapped, &page->flags);
27 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 26 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
28 clear_page(to); 27 clear_page(to);
29 else { 28 else {
@@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
59void copy_user_page(void *to, void *from, unsigned long address, 58void copy_user_page(void *to, void *from, unsigned long address,
60 struct page *page) 59 struct page *page)
61{ 60{
62 __set_bit(PG_mapped, &page->flags);
63 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 61 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
64 copy_page(to, from); 62 copy_page(to, from);
65 else { 63 else {
@@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address,
84 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); 82 mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
85 } 83 }
86} 84}
87
88/*
89 * For SH-4, we have our own implementation for ptep_get_and_clear
90 */
91inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
92{
93 pte_t pte = *ptep;
94
95 pte_clear(mm, addr, ptep);
96 if (!pte_not_present(pte)) {
97 unsigned long pfn = pte_pfn(pte);
98 if (pfn_valid(pfn)) {
99 struct page *page = pfn_to_page(pfn);
100 struct address_space *mapping = page_mapping(page);
101 if (!mapping || !mapping_writably_mapped(mapping))
102 __clear_bit(PG_mapped, &page->flags);
103 }
104 }
105 return pte;
106}
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index ff9ece986cbc..b052d0fee827 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -7,9 +7,7 @@
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 *
11 */ 10 */
12
13#include <linux/init.h> 11#include <linux/init.h>
14#include <linux/mman.h> 12#include <linux/mman.h>
15#include <linux/mm.h> 13#include <linux/mm.h>
@@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
76{ 74{
77 struct page *page = virt_to_page(to); 75 struct page *page = virt_to_page(to);
78 76
79 __set_bit(PG_mapped, &page->flags);
80 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { 77 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
81 clear_page(to); 78 clear_page(to);
82 __flush_wback_region(to, PAGE_SIZE); 79 __flush_wback_region(to, PAGE_SIZE);
@@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
95 * @from: P1 address 92 * @from: P1 address
96 * @address: U0 address to be mapped 93 * @address: U0 address to be mapped
97 */ 94 */
98void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) 95void copy_user_page(void *to, void *from, unsigned long address,
96 struct page *pg)
99{ 97{
100 struct page *page = virt_to_page(to); 98 struct page *page = virt_to_page(to);
101 99
102
103 __set_bit(PG_mapped, &page->flags);
104 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { 100 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
105 copy_page(to, from); 101 copy_page(to, from);
106 __flush_wback_region(to, PAGE_SIZE); 102 __flush_wback_region(to, PAGE_SIZE);
@@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg
112 __flush_wback_region(to, PAGE_SIZE); 108 __flush_wback_region(to, PAGE_SIZE);
113 } 109 }
114} 110}
115
116/*
117 * For SH7705, we have our own implementation for ptep_get_and_clear
118 * Copied from pg-sh4.c
119 */
120inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
121{
122 pte_t pte = *ptep;
123
124 pte_clear(mm, addr, ptep);
125 if (!pte_not_present(pte)) {
126 unsigned long pfn = pte_pfn(pte);
127 if (pfn_valid(pfn)) {
128 struct page *page = pfn_to_page(pfn);
129 struct address_space *mapping = page_mapping(page);
130 if (!mapping || !mapping_writably_mapped(mapping))
131 __clear_bit(PG_mapped, &page->flags);
132 }
133 }
134
135 return pte;
136}
137
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index 73ec7f6084fa..9347534aa894 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -2,15 +2,17 @@
2 * TLB flushing operations for SH with an MMU. 2 * TLB flushing operations for SH with an MMU.
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 - 2006 Paul Mundt
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 */ 10 */
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/io.h>
12#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
14 16
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 17void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{ 18{
@@ -132,3 +134,54 @@ void flush_tlb_all(void)
132 ctrl_barrier(); 134 ctrl_barrier();
133 local_irq_restore(flags); 135 local_irq_restore(flags);
134} 136}
137
138void update_mmu_cache(struct vm_area_struct *vma,
139 unsigned long address, pte_t pte)
140{
141 unsigned long flags;
142 unsigned long pteval;
143 unsigned long vpn;
144 struct page *page;
145 unsigned long pfn = pte_pfn(pte);
146 struct address_space *mapping;
147
148 if (!pfn_valid(pfn))
149 return;
150
151 page = pfn_to_page(pfn);
152 mapping = page_mapping(page);
153 if (mapping) {
154 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
155 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
156
157 if (dirty)
158 __flush_wback_region((void *)P1SEGADDR(phys),
159 PAGE_SIZE);
160 }
161
162 local_irq_save(flags);
163
164 /* Set PTEH register */
165 vpn = (address & MMU_VPN_MASK) | get_asid();
166 ctrl_outl(vpn, MMU_PTEH);
167
168 pteval = pte_val(pte);
169
170#ifdef CONFIG_CPU_HAS_PTEA
171 /* Set PTEA register */
172 /* TODO: make this look less hacky */
173 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
174#endif
175
176 /* Set PTEL register */
177 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
178#ifdef CONFIG_SH_WRITETHROUGH
179 pteval |= _PAGE_WT;
180#endif
181 /* conveniently, we want all the software flags to be 0 anyway */
182 ctrl_outl(pteval, MMU_PTEL);
183
184 /* Load the TLB */
185 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
186 local_irq_restore(flags);
187}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 46b09e26e082..16627069c536 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -8,69 +8,9 @@
8 * 8 *
9 * Released under the terms of the GNU GPL v2.0. 9 * Released under the terms of the GNU GPL v2.0.
10 */ 10 */
11#include <linux/signal.h> 11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23
24#include <asm/system.h> 12#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
29#include <asm/cacheflush.h>
30
31void update_mmu_cache(struct vm_area_struct * vma,
32 unsigned long address, pte_t pte)
33{
34 unsigned long flags;
35 unsigned long pteval;
36 unsigned long vpn;
37
38 /* Ptrace may call this routine. */
39 if (vma && current->active_mm != vma->vm_mm)
40 return;
41
42#if defined(CONFIG_SH7705_CACHE_32KB)
43 {
44 struct page *page = pte_page(pte);
45 unsigned long pfn = pte_pfn(pte);
46
47 if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
48 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
49
50 __flush_wback_region((void *)P1SEGADDR(phys),
51 PAGE_SIZE);
52 __set_bit(PG_mapped, &page->flags);
53 }
54 }
55#endif
56
57 local_irq_save(flags);
58
59 /* Set PTEH register */
60 vpn = (address & MMU_VPN_MASK) | get_asid();
61 ctrl_outl(vpn, MMU_PTEH);
62
63 pteval = pte_val(pte);
64
65 /* Set PTEL register */
66 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
67 /* conveniently, we want all the software flags to be 0 anyway */
68 ctrl_outl(pteval, MMU_PTEL);
69
70 /* Load the TLB */
71 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
72 local_irq_restore(flags);
73}
74 14
75void __flush_tlb_page(unsigned long asid, unsigned long page) 15void __flush_tlb_page(unsigned long asid, unsigned long page)
76{ 16{
@@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
94 for (i = 0; i < ways; i++) 34 for (i = 0; i < ways; i++)
95 ctrl_outl(data, addr + (i << 8)); 35 ctrl_outl(data, addr + (i << 8));
96} 36}
97
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 812b2d567de2..758d8dec622b 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -8,74 +8,9 @@
8 * 8 *
9 * Released under the terms of the GNU GPL v2.0. 9 * Released under the terms of the GNU GPL v2.0.
10 */ 10 */
11#include <linux/signal.h> 11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23
24#include <asm/system.h> 12#include <asm/system.h>
25#include <asm/io.h>
26#include <asm/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
29#include <asm/cacheflush.h>
30
31void update_mmu_cache(struct vm_area_struct * vma,
32 unsigned long address, pte_t pte)
33{
34 unsigned long flags;
35 unsigned long pteval;
36 unsigned long vpn;
37 struct page *page;
38 unsigned long pfn;
39
40 /* Ptrace may call this routine. */
41 if (vma && current->active_mm != vma->vm_mm)
42 return;
43
44 pfn = pte_pfn(pte);
45 if (pfn_valid(pfn)) {
46 page = pfn_to_page(pfn);
47 if (!test_bit(PG_mapped, &page->flags)) {
48 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
49 __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
50 __set_bit(PG_mapped, &page->flags);
51 }
52 }
53
54 local_irq_save(flags);
55
56 /* Set PTEH register */
57 vpn = (address & MMU_VPN_MASK) | get_asid();
58 ctrl_outl(vpn, MMU_PTEH);
59
60 pteval = pte_val(pte);
61
62 /* Set PTEA register */
63 if (cpu_data->flags & CPU_HAS_PTEA)
64 /* TODO: make this look less hacky */
65 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
66
67 /* Set PTEL register */
68 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
69#ifdef CONFIG_SH_WRITETHROUGH
70 pteval |= _PAGE_WT;
71#endif
72 /* conveniently, we want all the software flags to be 0 anyway */
73 ctrl_outl(pteval, MMU_PTEL);
74
75 /* Load the TLB */
76 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
77 local_irq_restore(flags);
78}
79 14
80void __flush_tlb_page(unsigned long asid, unsigned long page) 15void __flush_tlb_page(unsigned long asid, unsigned long page)
81{ 16{
@@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
93 ctrl_outl(data, addr); 28 ctrl_outl(data, addr);
94 back_to_P1(); 29 back_to_P1();
95} 30}
96