diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Makefile | 25 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2.c | 50 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh3.c | 100 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 362 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 206 | ||||
-rw-r--r-- | arch/sh/mm/clear_page.S | 295 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 85 | ||||
-rw-r--r-- | arch/sh/mm/copy_page.S | 397 | ||||
-rw-r--r-- | arch/sh/mm/extable.c | 22 | ||||
-rw-r--r-- | arch/sh/mm/fault-nommu.c | 82 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 374 | ||||
-rw-r--r-- | arch/sh/mm/hugetlbpage.c | 264 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 313 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 163 | ||||
-rw-r--r-- | arch/sh/mm/pg-dma.c | 97 | ||||
-rw-r--r-- | arch/sh/mm/pg-nommu.c | 36 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 122 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh7705.c | 137 | ||||
-rw-r--r-- | arch/sh/mm/tlb-nommu.c | 58 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 92 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 96 |
21 files changed, 3376 insertions, 0 deletions
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile new file mode 100644 index 000000000000..9489a1424644 --- /dev/null +++ b/arch/sh/mm/Makefile | |||
@@ -0,0 +1,25 @@ | |||
1 | # | ||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o extable.o consistent.o | ||
6 | |||
7 | obj-$(CONFIG_CPU_SH2) += cache-sh2.o | ||
8 | obj-$(CONFIG_CPU_SH3) += cache-sh3.o | ||
9 | obj-$(CONFIG_CPU_SH4) += cache-sh4.o pg-sh4.o | ||
10 | |||
11 | obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o | ||
12 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
13 | |||
14 | mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o | ||
15 | mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o | ||
16 | |||
17 | obj-y += $(mmu-y) | ||
18 | |||
19 | ifdef CONFIG_MMU | ||
20 | obj-$(CONFIG_CPU_SH3) += tlb-sh3.o | ||
21 | obj-$(CONFIG_CPU_SH4) += tlb-sh4.o ioremap.o | ||
22 | obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o | ||
23 | endif | ||
24 | |||
25 | obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c new file mode 100644 index 000000000000..2689cb24ea2b --- /dev/null +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh2.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mundt | ||
5 | * | ||
6 | * Released under the terms of the GNU GPL v2.0. | ||
7 | */ | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/mm.h> | ||
10 | |||
11 | #include <asm/cache.h> | ||
12 | #include <asm/addrspace.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/io.h> | ||
16 | |||
17 | /* | ||
18 | * Calculate the OC address and set the way bit on the SH-2. | ||
19 | * | ||
20 | * We must have already jump_to_P2()'ed prior to calling this | ||
21 | * function, since we rely on CCR manipulation to do the | ||
22 | * Right Thing(tm). | ||
23 | */ | ||
24 | unsigned long __get_oc_addr(unsigned long set, unsigned long way) | ||
25 | { | ||
26 | unsigned long ccr; | ||
27 | |||
28 | /* | ||
29 | * On SH-2 the way bit isn't tracked in the address field | ||
30 | * if we're doing address array access .. instead, we need | ||
31 | * to manually switch out the way in the CCR. | ||
32 | */ | ||
33 | ccr = ctrl_inl(CCR); | ||
34 | ccr &= ~0x00c0; | ||
35 | ccr |= way << cpu_data->dcache.way_shift; | ||
36 | |||
37 | /* | ||
38 | * Despite the number of sets being halved, we end up losing | ||
39 | * the first 2 ways to OCRAM instead of the last 2 (if we're | ||
40 | * 4-way). As a result, forcibly setting the W1 bit handily | ||
41 | * bumps us up 2 ways. | ||
42 | */ | ||
43 | if (ccr & CCR_CACHE_ORA) | ||
44 | ccr |= 1 << (cpu_data->dcache.way_shift + 1); | ||
45 | |||
46 | ctrl_outl(ccr, CCR); | ||
47 | |||
48 | return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); | ||
49 | } | ||
50 | |||
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c new file mode 100644 index 000000000000..838731fc608d --- /dev/null +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh3.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2002 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/mman.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <asm/addrspace.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | /* | ||
26 | * Write back the dirty D-caches, but not invalidate them. | ||
27 | * | ||
28 | * Is this really worth it, or should we just alias this routine | ||
29 | * to __flush_purge_region too? | ||
30 | * | ||
31 | * START: Virtual Address (U0, P1, or P3) | ||
32 | * SIZE: Size of the region. | ||
33 | */ | ||
34 | |||
35 | void __flush_wback_region(void *start, int size) | ||
36 | { | ||
37 | unsigned long v, j; | ||
38 | unsigned long begin, end; | ||
39 | unsigned long flags; | ||
40 | |||
41 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
42 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
43 | & ~(L1_CACHE_BYTES-1); | ||
44 | |||
45 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
46 | unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; | ||
47 | for (j = 0; j < cpu_data->dcache.ways; j++) { | ||
48 | unsigned long data, addr, p; | ||
49 | |||
50 | p = __pa(v); | ||
51 | addr = addrstart | (v & cpu_data->dcache.entry_mask); | ||
52 | local_irq_save(flags); | ||
53 | data = ctrl_inl(addr); | ||
54 | |||
55 | if ((data & CACHE_PHYSADDR_MASK) == | ||
56 | (p & CACHE_PHYSADDR_MASK)) { | ||
57 | data &= ~SH_CACHE_UPDATED; | ||
58 | ctrl_outl(data, addr); | ||
59 | local_irq_restore(flags); | ||
60 | break; | ||
61 | } | ||
62 | local_irq_restore(flags); | ||
63 | addrstart += cpu_data->dcache.way_incr; | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Write back the dirty D-caches and invalidate them. | ||
70 | * | ||
71 | * START: Virtual Address (U0, P1, or P3) | ||
72 | * SIZE: Size of the region. | ||
73 | */ | ||
74 | void __flush_purge_region(void *start, int size) | ||
75 | { | ||
76 | unsigned long v; | ||
77 | unsigned long begin, end; | ||
78 | |||
79 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
80 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
81 | & ~(L1_CACHE_BYTES-1); | ||
82 | |||
83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
84 | unsigned long data, addr; | ||
85 | |||
86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ | ||
87 | addr = CACHE_OC_ADDRESS_ARRAY | | ||
88 | (v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC; | ||
89 | ctrl_outl(data, addr); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * No write back please | ||
95 | * | ||
96 | * Except I don't think there's any way to avoid the writeback. So we | ||
97 | * just alias it to __flush_purge_region(). dwmw2. | ||
98 | */ | ||
99 | void __flush_invalidate_region(void *start, int size) | ||
100 | __attribute__((alias("__flush_purge_region"))); | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c new file mode 100644 index 000000000000..ab833adf28c3 --- /dev/null +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh4.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt | ||
6 | * Copyright (C) 2003 Richard Curnow | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/addrspace.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/cache.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | extern void __flush_cache_4096_all(unsigned long start); | ||
30 | static void __flush_cache_4096_all_ex(unsigned long start); | ||
31 | extern void __flush_dcache_all(void); | ||
32 | static void __flush_dcache_all_ex(void); | ||
33 | |||
34 | /* | ||
35 | * SH-4 has virtually indexed and physically tagged cache. | ||
36 | */ | ||
37 | |||
38 | struct semaphore p3map_sem[4]; | ||
39 | |||
40 | void __init p3_cache_init(void) | ||
41 | { | ||
42 | if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE)) | ||
43 | panic("%s failed.", __FUNCTION__); | ||
44 | |||
45 | sema_init (&p3map_sem[0], 1); | ||
46 | sema_init (&p3map_sem[1], 1); | ||
47 | sema_init (&p3map_sem[2], 1); | ||
48 | sema_init (&p3map_sem[3], 1); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Write back the dirty D-caches, but not invalidate them. | ||
53 | * | ||
54 | * START: Virtual Address (U0, P1, or P3) | ||
55 | * SIZE: Size of the region. | ||
56 | */ | ||
57 | void __flush_wback_region(void *start, int size) | ||
58 | { | ||
59 | unsigned long v; | ||
60 | unsigned long begin, end; | ||
61 | |||
62 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
63 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
64 | & ~(L1_CACHE_BYTES-1); | ||
65 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
66 | asm volatile("ocbwb %0" | ||
67 | : /* no output */ | ||
68 | : "m" (__m(v))); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Write back the dirty D-caches and invalidate them. | ||
74 | * | ||
75 | * START: Virtual Address (U0, P1, or P3) | ||
76 | * SIZE: Size of the region. | ||
77 | */ | ||
78 | void __flush_purge_region(void *start, int size) | ||
79 | { | ||
80 | unsigned long v; | ||
81 | unsigned long begin, end; | ||
82 | |||
83 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
84 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
85 | & ~(L1_CACHE_BYTES-1); | ||
86 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
87 | asm volatile("ocbp %0" | ||
88 | : /* no output */ | ||
89 | : "m" (__m(v))); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | |||
94 | /* | ||
95 | * No write back please | ||
96 | */ | ||
97 | void __flush_invalidate_region(void *start, int size) | ||
98 | { | ||
99 | unsigned long v; | ||
100 | unsigned long begin, end; | ||
101 | |||
102 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
103 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
104 | & ~(L1_CACHE_BYTES-1); | ||
105 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
106 | asm volatile("ocbi %0" | ||
107 | : /* no output */ | ||
108 | : "m" (__m(v))); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static void __flush_dcache_all_ex(void) | ||
113 | { | ||
114 | unsigned long addr, end_addr, entry_offset; | ||
115 | |||
116 | end_addr = CACHE_OC_ADDRESS_ARRAY + (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) * cpu_data->dcache.ways; | ||
117 | entry_offset = 1 << cpu_data->dcache.entry_shift; | ||
118 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; addr += entry_offset) { | ||
119 | ctrl_outl(0, addr); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static void __flush_cache_4096_all_ex(unsigned long start) | ||
124 | { | ||
125 | unsigned long addr, entry_offset; | ||
126 | int i; | ||
127 | |||
128 | entry_offset = 1 << cpu_data->dcache.entry_shift; | ||
129 | for (i = 0; i < cpu_data->dcache.ways; i++, start += cpu_data->dcache.way_incr) { | ||
130 | for (addr = CACHE_OC_ADDRESS_ARRAY + start; | ||
131 | addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start; | ||
132 | addr += entry_offset) { | ||
133 | ctrl_outl(0, addr); | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void flush_cache_4096_all(unsigned long start) | ||
139 | { | ||
140 | if (cpu_data->dcache.ways == 1) | ||
141 | __flush_cache_4096_all(start); | ||
142 | else | ||
143 | __flush_cache_4096_all_ex(start); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Write back the range of D-cache, and purge the I-cache. | ||
148 | * | ||
149 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | ||
150 | */ | ||
151 | void flush_icache_range(unsigned long start, unsigned long end) | ||
152 | { | ||
153 | flush_cache_all(); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Write back the D-cache and purge the I-cache for signal trampoline. | ||
158 | * .. which happens to be the same behavior as flush_icache_range(). | ||
159 | * So, we simply flush out a line. | ||
160 | */ | ||
161 | void flush_cache_sigtramp(unsigned long addr) | ||
162 | { | ||
163 | unsigned long v, index; | ||
164 | unsigned long flags; | ||
165 | int i; | ||
166 | |||
167 | v = addr & ~(L1_CACHE_BYTES-1); | ||
168 | asm volatile("ocbwb %0" | ||
169 | : /* no output */ | ||
170 | : "m" (__m(v))); | ||
171 | |||
172 | index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); | ||
173 | |||
174 | local_irq_save(flags); | ||
175 | jump_to_P2(); | ||
176 | for(i = 0; i < cpu_data->icache.ways; i++, index += cpu_data->icache.way_incr) | ||
177 | ctrl_outl(0, index); /* Clear out Valid-bit */ | ||
178 | back_to_P1(); | ||
179 | local_irq_restore(flags); | ||
180 | } | ||
181 | |||
182 | static inline void flush_cache_4096(unsigned long start, | ||
183 | unsigned long phys) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset); | ||
187 | |||
188 | /* | ||
189 | * SH7751, SH7751R, and ST40 have no restriction to handle cache. | ||
190 | * (While SH7750 must do that at P2 area.) | ||
191 | */ | ||
192 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) | ||
193 | || start < CACHE_OC_ADDRESS_ARRAY) { | ||
194 | local_irq_save(flags); | ||
195 | __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0x20000000); | ||
196 | local_irq_restore(flags); | ||
197 | } else { | ||
198 | __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Write back & invalidate the D-cache of the page. | ||
204 | * (To avoid "alias" issues) | ||
205 | */ | ||
206 | void flush_dcache_page(struct page *page) | ||
207 | { | ||
208 | if (test_bit(PG_mapped, &page->flags)) { | ||
209 | unsigned long phys = PHYSADDR(page_address(page)); | ||
210 | |||
211 | /* Loop all the D-cache */ | ||
212 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys); | ||
213 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys); | ||
214 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys); | ||
215 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static inline void flush_icache_all(void) | ||
220 | { | ||
221 | unsigned long flags, ccr; | ||
222 | |||
223 | local_irq_save(flags); | ||
224 | jump_to_P2(); | ||
225 | |||
226 | /* Flush I-cache */ | ||
227 | ccr = ctrl_inl(CCR); | ||
228 | ccr |= CCR_CACHE_ICI; | ||
229 | ctrl_outl(ccr, CCR); | ||
230 | |||
231 | back_to_P1(); | ||
232 | local_irq_restore(flags); | ||
233 | } | ||
234 | |||
235 | void flush_cache_all(void) | ||
236 | { | ||
237 | if (cpu_data->dcache.ways == 1) | ||
238 | __flush_dcache_all(); | ||
239 | else | ||
240 | __flush_dcache_all_ex(); | ||
241 | flush_icache_all(); | ||
242 | } | ||
243 | |||
244 | void flush_cache_mm(struct mm_struct *mm) | ||
245 | { | ||
246 | /* Is there any good way? */ | ||
247 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
248 | /* | ||
249 | * FIXME: Really, the optimal solution here would be able to flush out | ||
250 | * individual lines created by the specified context, but this isn't | ||
251 | * feasible for a number of architectures (such as MIPS, and some | ||
252 | * SPARC) .. is this possible for SuperH? | ||
253 | * | ||
254 | * In the meantime, we'll just flush all of the caches.. this | ||
255 | * seems to be the simplest way to avoid at least a few wasted | ||
256 | * cache flushes. -Lethal | ||
257 | */ | ||
258 | flush_cache_all(); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Write back and invalidate I/D-caches for the page. | ||
263 | * | ||
264 | * ADDR: Virtual Address (U0 address) | ||
265 | * PFN: Physical page number | ||
266 | */ | ||
267 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) | ||
268 | { | ||
269 | unsigned long phys = pfn << PAGE_SHIFT; | ||
270 | |||
271 | /* We only need to flush D-cache when we have alias */ | ||
272 | if ((address^phys) & CACHE_ALIAS) { | ||
273 | /* Loop 4K of the D-cache */ | ||
274 | flush_cache_4096( | ||
275 | CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS), | ||
276 | phys); | ||
277 | /* Loop another 4K of the D-cache */ | ||
278 | flush_cache_4096( | ||
279 | CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS), | ||
280 | phys); | ||
281 | } | ||
282 | |||
283 | if (vma->vm_flags & VM_EXEC) | ||
284 | /* Loop 4K (half) of the I-cache */ | ||
285 | flush_cache_4096( | ||
286 | CACHE_IC_ADDRESS_ARRAY | (address & 0x1000), | ||
287 | phys); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Write back and invalidate D-caches. | ||
292 | * | ||
293 | * START, END: Virtual Address (U0 address) | ||
294 | * | ||
295 | * NOTE: We need to flush the _physical_ page entry. | ||
296 | * Flushing the cache lines for U0 only isn't enough. | ||
297 | * We need to flush for P1 too, which may contain aliases. | ||
298 | */ | ||
299 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
300 | unsigned long end) | ||
301 | { | ||
302 | unsigned long p = start & PAGE_MASK; | ||
303 | pgd_t *dir; | ||
304 | pmd_t *pmd; | ||
305 | pte_t *pte; | ||
306 | pte_t entry; | ||
307 | unsigned long phys; | ||
308 | unsigned long d = 0; | ||
309 | |||
310 | dir = pgd_offset(vma->vm_mm, p); | ||
311 | pmd = pmd_offset(dir, p); | ||
312 | |||
313 | do { | ||
314 | if (pmd_none(*pmd) || pmd_bad(*pmd)) { | ||
315 | p &= ~((1 << PMD_SHIFT) -1); | ||
316 | p += (1 << PMD_SHIFT); | ||
317 | pmd++; | ||
318 | continue; | ||
319 | } | ||
320 | pte = pte_offset_kernel(pmd, p); | ||
321 | do { | ||
322 | entry = *pte; | ||
323 | if ((pte_val(entry) & _PAGE_PRESENT)) { | ||
324 | phys = pte_val(entry)&PTE_PHYS_MASK; | ||
325 | if ((p^phys) & CACHE_ALIAS) { | ||
326 | d |= 1 << ((p & CACHE_ALIAS)>>12); | ||
327 | d |= 1 << ((phys & CACHE_ALIAS)>>12); | ||
328 | if (d == 0x0f) | ||
329 | goto loop_exit; | ||
330 | } | ||
331 | } | ||
332 | pte++; | ||
333 | p += PAGE_SIZE; | ||
334 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
335 | pmd++; | ||
336 | } while (p < end); | ||
337 | loop_exit: | ||
338 | if (d & 1) | ||
339 | flush_cache_4096_all(0); | ||
340 | if (d & 2) | ||
341 | flush_cache_4096_all(0x1000); | ||
342 | if (d & 4) | ||
343 | flush_cache_4096_all(0x2000); | ||
344 | if (d & 8) | ||
345 | flush_cache_4096_all(0x3000); | ||
346 | if (vma->vm_flags & VM_EXEC) | ||
347 | flush_icache_all(); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * flush_icache_user_range | ||
352 | * @vma: VMA of the process | ||
353 | * @page: page | ||
354 | * @addr: U0 address | ||
355 | * @len: length of the range (< page size) | ||
356 | */ | ||
357 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
358 | struct page *page, unsigned long addr, int len) | ||
359 | { | ||
360 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
361 | } | ||
362 | |||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c new file mode 100644 index 000000000000..ad8ed7d41e16 --- /dev/null +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh7705.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2004 Alex Song | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <asm/addrspace.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/cache.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | |||
28 | /* The 32KB cache on the SH7705 suffers from the same synonym problem | ||
29 | * as SH4 CPUs */ | ||
30 | |||
31 | #define __pte_offset(address) \ | ||
32 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
33 | #define pte_offset(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ | ||
34 | __pte_offset(address)) | ||
35 | |||
36 | static inline void cache_wback_all(void) | ||
37 | { | ||
38 | unsigned long ways, waysize, addrstart; | ||
39 | |||
40 | ways = cpu_data->dcache.ways; | ||
41 | waysize = cpu_data->dcache.sets; | ||
42 | waysize <<= cpu_data->dcache.entry_shift; | ||
43 | |||
44 | addrstart = CACHE_OC_ADDRESS_ARRAY; | ||
45 | |||
46 | do { | ||
47 | unsigned long addr; | ||
48 | |||
49 | for (addr = addrstart; | ||
50 | addr < addrstart + waysize; | ||
51 | addr += cpu_data->dcache.linesz) { | ||
52 | unsigned long data; | ||
53 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; | ||
54 | |||
55 | data = ctrl_inl(addr); | ||
56 | |||
57 | if ((data & v) == v) | ||
58 | ctrl_outl(data & ~v, addr); | ||
59 | |||
60 | } | ||
61 | |||
62 | addrstart += cpu_data->dcache.way_incr; | ||
63 | } while (--ways); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Write back the range of D-cache, and purge the I-cache. | ||
68 | * | ||
69 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | ||
70 | */ | ||
71 | void flush_icache_range(unsigned long start, unsigned long end) | ||
72 | { | ||
73 | __flush_wback_region((void *)start, end - start); | ||
74 | } | ||
75 | |||
76 | |||
77 | /* | ||
78 | * Writeback&Invalidate the D-cache of the page | ||
79 | */ | ||
80 | static void __flush_dcache_page(unsigned long phys) | ||
81 | { | ||
82 | unsigned long ways, waysize, addrstart; | ||
83 | unsigned long flags; | ||
84 | |||
85 | phys |= SH_CACHE_VALID; | ||
86 | |||
87 | /* | ||
88 | * Here, phys is the physical address of the page. We check all the | ||
89 | * tags in the cache for those with the same page number as this page | ||
90 | * (by masking off the lowest 2 bits of the 19-bit tag; these bits are | ||
91 | * derived from the offset within in the 4k page). Matching valid | ||
92 | * entries are invalidated. | ||
93 | * | ||
94 | * Since 2 bits of the cache index are derived from the virtual page | ||
95 | * number, knowing this would reduce the number of cache entries to be | ||
96 | * searched by a factor of 4. However this function exists to deal with | ||
97 | * potential cache aliasing, therefore the optimisation is probably not | ||
98 | * possible. | ||
99 | */ | ||
100 | local_irq_save(flags); | ||
101 | jump_to_P2(); | ||
102 | |||
103 | ways = cpu_data->dcache.ways; | ||
104 | waysize = cpu_data->dcache.sets; | ||
105 | waysize <<= cpu_data->dcache.entry_shift; | ||
106 | |||
107 | addrstart = CACHE_OC_ADDRESS_ARRAY; | ||
108 | |||
109 | do { | ||
110 | unsigned long addr; | ||
111 | |||
112 | for (addr = addrstart; | ||
113 | addr < addrstart + waysize; | ||
114 | addr += cpu_data->dcache.linesz) { | ||
115 | unsigned long data; | ||
116 | |||
117 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); | ||
118 | if (data == phys) { | ||
119 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); | ||
120 | ctrl_outl(data, addr); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | addrstart += cpu_data->dcache.way_incr; | ||
125 | } while (--ways); | ||
126 | |||
127 | back_to_P1(); | ||
128 | local_irq_restore(flags); | ||
129 | } | ||
130 | |||
131 | |||
132 | /* | ||
133 | * Write back & invalidate the D-cache of the page. | ||
134 | * (To avoid "alias" issues) | ||
135 | */ | ||
136 | void flush_dcache_page(struct page *page) | ||
137 | { | ||
138 | if (test_bit(PG_mapped, &page->flags)) | ||
139 | __flush_dcache_page(PHYSADDR(page_address(page))); | ||
140 | } | ||
141 | |||
142 | void flush_cache_all(void) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | jump_to_P2(); | ||
148 | |||
149 | cache_wback_all(); | ||
150 | back_to_P1(); | ||
151 | local_irq_restore(flags); | ||
152 | } | ||
153 | |||
154 | void flush_cache_mm(struct mm_struct *mm) | ||
155 | { | ||
156 | /* Is there any good way? */ | ||
157 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
158 | flush_cache_all(); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Write back and invalidate D-caches. | ||
163 | * | ||
164 | * START, END: Virtual Address (U0 address) | ||
165 | * | ||
166 | * NOTE: We need to flush the _physical_ page entry. | ||
167 | * Flushing the cache lines for U0 only isn't enough. | ||
168 | * We need to flush for P1 too, which may contain aliases. | ||
169 | */ | ||
170 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
171 | unsigned long end) | ||
172 | { | ||
173 | |||
174 | /* | ||
175 | * We could call flush_cache_page for the pages of these range, | ||
176 | * but it's not efficient (scan the caches all the time...). | ||
177 | * | ||
178 | * We can't use A-bit magic, as there's the case we don't have | ||
179 | * valid entry on TLB. | ||
180 | */ | ||
181 | flush_cache_all(); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Write back and invalidate I/D-caches for the page. | ||
186 | * | ||
187 | * ADDRESS: Virtual Address (U0 address) | ||
188 | */ | ||
189 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) | ||
190 | { | ||
191 | __flush_dcache_page(pfn << PAGE_SHIFT); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * This is called when a page-cache page is about to be mapped into a | ||
196 | * user process' address space. It offers an opportunity for a | ||
197 | * port to ensure d-cache/i-cache coherency if necessary. | ||
198 | * | ||
199 | * Not entirely sure why this is necessary on SH3 with 32K cache but | ||
200 | * without it we get occasional "Memory fault" when loading a program. | ||
201 | */ | ||
202 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
203 | { | ||
204 | __flush_purge_region(page_address(page), PAGE_SIZE); | ||
205 | } | ||
206 | |||
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S new file mode 100644 index 000000000000..ae58a61f0e66 --- /dev/null +++ b/arch/sh/mm/clear_page.S | |||
@@ -0,0 +1,295 @@ | |||
1 | /* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ | ||
2 | * | ||
3 | * __clear_user_page, __clear_user, clear_page implementation of SuperH | ||
4 | * | ||
5 | * Copyright (C) 2001 Kaz Kojima | ||
6 | * Copyright (C) 2001, 2002 Niibe Yutaka | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/config.h> | ||
10 | #include <linux/linkage.h> | ||
11 | |||
12 | /* | ||
13 | * clear_page_slow | ||
14 | * @to: P1 address | ||
15 | * | ||
16 | * void clear_page_slow(void *to) | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * r0 --- scratch | ||
21 | * r4 --- to | ||
22 | * r5 --- to + 4096 | ||
23 | */ | ||
24 | ENTRY(clear_page_slow) | ||
25 | mov r4,r5 | ||
26 | mov.w .Llimit,r0 | ||
27 | add r0,r5 | ||
28 | mov #0,r0 | ||
29 | ! | ||
30 | 1: | ||
31 | #if defined(CONFIG_CPU_SH3) | ||
32 | mov.l r0,@r4 | ||
33 | #elif defined(CONFIG_CPU_SH4) | ||
34 | movca.l r0,@r4 | ||
35 | mov r4,r1 | ||
36 | #endif | ||
37 | add #32,r4 | ||
38 | mov.l r0,@-r4 | ||
39 | mov.l r0,@-r4 | ||
40 | mov.l r0,@-r4 | ||
41 | mov.l r0,@-r4 | ||
42 | mov.l r0,@-r4 | ||
43 | mov.l r0,@-r4 | ||
44 | mov.l r0,@-r4 | ||
45 | #if defined(CONFIG_CPU_SH4) | ||
46 | ocbwb @r1 | ||
47 | #endif | ||
48 | cmp/eq r5,r4 | ||
49 | bf/s 1b | ||
50 | add #28,r4 | ||
51 | ! | ||
52 | rts | ||
53 | nop | ||
54 | .Llimit: .word (4096-28) | ||
55 | |||
56 | ENTRY(__clear_user) | ||
57 | ! | ||
58 | mov #0, r0 | ||
59 | mov #0xe0, r1 ! 0xffffffe0 | ||
60 | ! | ||
61 | ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ] | ||
62 | ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ] | ||
63 | ! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ] | ||
64 | ! | ||
65 | ! Clear area 0 | ||
66 | mov r4, r2 | ||
67 | ! | ||
68 | tst r1, r5 ! length < 32 | ||
69 | bt .Larea2 ! skip to remainder | ||
70 | ! | ||
71 | add #31, r2 | ||
72 | and r1, r2 | ||
73 | cmp/eq r4, r2 | ||
74 | bt .Larea1 | ||
75 | mov r2, r3 | ||
76 | sub r4, r3 | ||
77 | mov r3, r7 | ||
78 | mov r4, r2 | ||
79 | ! | ||
80 | .L0: dt r3 | ||
81 | 0: mov.b r0, @r2 | ||
82 | bf/s .L0 | ||
83 | add #1, r2 | ||
84 | ! | ||
85 | sub r7, r5 | ||
86 | mov r2, r4 | ||
87 | .Larea1: | ||
88 | mov r4, r3 | ||
89 | add r5, r3 | ||
90 | and r1, r3 | ||
91 | cmp/hi r2, r3 | ||
92 | bf .Larea2 | ||
93 | ! | ||
94 | ! Clear area 1 | ||
95 | #if defined(CONFIG_CPU_SH4) | ||
96 | 1: movca.l r0, @r2 | ||
97 | #else | ||
98 | 1: mov.l r0, @r2 | ||
99 | #endif | ||
100 | add #4, r2 | ||
101 | 2: mov.l r0, @r2 | ||
102 | add #4, r2 | ||
103 | 3: mov.l r0, @r2 | ||
104 | add #4, r2 | ||
105 | 4: mov.l r0, @r2 | ||
106 | add #4, r2 | ||
107 | 5: mov.l r0, @r2 | ||
108 | add #4, r2 | ||
109 | 6: mov.l r0, @r2 | ||
110 | add #4, r2 | ||
111 | 7: mov.l r0, @r2 | ||
112 | add #4, r2 | ||
113 | 8: mov.l r0, @r2 | ||
114 | add #4, r2 | ||
115 | cmp/hi r2, r3 | ||
116 | bt/s 1b | ||
117 | nop | ||
118 | ! | ||
119 | ! Clear area 2 | ||
120 | .Larea2: | ||
121 | mov r4, r3 | ||
122 | add r5, r3 | ||
123 | cmp/hs r3, r2 | ||
124 | bt/s .Ldone | ||
125 | sub r2, r3 | ||
126 | .L2: dt r3 | ||
127 | 9: mov.b r0, @r2 | ||
128 | bf/s .L2 | ||
129 | add #1, r2 | ||
130 | ! | ||
131 | .Ldone: rts | ||
132 | mov #0, r0 ! return 0 as normal return | ||
133 | |||
134 | ! return the number of bytes remained | ||
135 | .Lbad_clear_user: | ||
136 | mov r4, r0 | ||
137 | add r5, r0 | ||
138 | rts | ||
139 | sub r2, r0 | ||
140 | |||
141 | .section __ex_table,"a" | ||
142 | .align 2 | ||
143 | .long 0b, .Lbad_clear_user | ||
144 | .long 1b, .Lbad_clear_user | ||
145 | .long 2b, .Lbad_clear_user | ||
146 | .long 3b, .Lbad_clear_user | ||
147 | .long 4b, .Lbad_clear_user | ||
148 | .long 5b, .Lbad_clear_user | ||
149 | .long 6b, .Lbad_clear_user | ||
150 | .long 7b, .Lbad_clear_user | ||
151 | .long 8b, .Lbad_clear_user | ||
152 | .long 9b, .Lbad_clear_user | ||
153 | .previous | ||
154 | |||
155 | #if defined(CONFIG_CPU_SH4) | ||
156 | /* | ||
157 | * __clear_user_page | ||
158 | * @to: P3 address (with same color) | ||
159 | * @orig_to: P1 address | ||
160 | * | ||
161 | * void __clear_user_page(void *to, void *orig_to) | ||
162 | */ | ||
163 | |||
164 | /* | ||
165 | * r0 --- scratch | ||
166 | * r4 --- to | ||
167 | * r5 --- orig_to | ||
168 | * r6 --- to + 4096 | ||
169 | */ | ||
170 | ENTRY(__clear_user_page) | ||
171 | mov.w .L4096,r0 | ||
172 | mov r4,r6 | ||
173 | add r0,r6 | ||
174 | mov #0,r0 | ||
175 | ! | ||
176 | 1: ocbi @r5 | ||
177 | add #32,r5 | ||
178 | movca.l r0,@r4 | ||
179 | mov r4,r1 | ||
180 | add #32,r4 | ||
181 | mov.l r0,@-r4 | ||
182 | mov.l r0,@-r4 | ||
183 | mov.l r0,@-r4 | ||
184 | mov.l r0,@-r4 | ||
185 | mov.l r0,@-r4 | ||
186 | mov.l r0,@-r4 | ||
187 | mov.l r0,@-r4 | ||
188 | add #28,r4 | ||
189 | cmp/eq r6,r4 | ||
190 | bf/s 1b | ||
191 | ocbwb @r1 | ||
192 | ! | ||
193 | rts | ||
194 | nop | ||
195 | .L4096: .word 4096 | ||
196 | |||
197 | ENTRY(__flush_cache_4096) | ||
198 | mov.l 1f,r3 | ||
199 | add r6,r3 | ||
200 | mov r4,r0 | ||
201 | mov #64,r2 | ||
202 | shll r2 | ||
203 | mov #64,r6 | ||
204 | jmp @r3 | ||
205 | mov #96,r7 | ||
206 | .align 2 | ||
207 | 1: .long 2f | ||
208 | 2: | ||
209 | .rept 32 | ||
210 | mov.l r5,@r0 | ||
211 | mov.l r5,@(32,r0) | ||
212 | mov.l r5,@(r0,r6) | ||
213 | mov.l r5,@(r0,r7) | ||
214 | add r2,r5 | ||
215 | add r2,r0 | ||
216 | .endr | ||
217 | nop | ||
218 | nop | ||
219 | nop | ||
220 | nop | ||
221 | nop | ||
222 | nop | ||
223 | nop | ||
224 | rts | ||
225 | nop | ||
226 | |||
227 | ENTRY(__flush_dcache_all) | ||
228 | mov.l 2f,r0 | ||
229 | mov.l 3f,r4 | ||
230 | and r0,r4 ! r4 = (unsigned long)&empty_zero_page[0] & ~0xffffc000 | ||
231 | stc sr,r1 ! save SR | ||
232 | mov.l 4f,r2 | ||
233 | or r1,r2 | ||
234 | mov #32,r3 | ||
235 | shll2 r3 | ||
236 | 1: | ||
237 | ldc r2,sr ! set BL bit | ||
238 | movca.l r0,@r4 | ||
239 | ocbi @r4 | ||
240 | add #32,r4 | ||
241 | movca.l r0,@r4 | ||
242 | ocbi @r4 | ||
243 | add #32,r4 | ||
244 | movca.l r0,@r4 | ||
245 | ocbi @r4 | ||
246 | add #32,r4 | ||
247 | movca.l r0,@r4 | ||
248 | ocbi @r4 | ||
249 | ldc r1,sr ! restore SR | ||
250 | dt r3 | ||
251 | bf/s 1b | ||
252 | add #32,r4 | ||
253 | |||
254 | rts | ||
255 | nop | ||
256 | .align 2 | ||
257 | 2: .long 0xffffc000 | ||
258 | 3: .long empty_zero_page | ||
259 | 4: .long 0x10000000 ! BL bit | ||
260 | |||
261 | /* __flush_cache_4096_all(unsigned long addr) */ | ||
262 | ENTRY(__flush_cache_4096_all) | ||
263 | mov.l 2f,r0 | ||
264 | mov.l 3f,r2 | ||
265 | and r0,r2 | ||
266 | or r2,r4 ! r4 = addr | (unsigned long)&empty_zero_page[0] & ~0x3fff | ||
267 | stc sr,r1 ! save SR | ||
268 | mov.l 4f,r2 | ||
269 | or r1,r2 | ||
270 | mov #32,r3 | ||
271 | 1: | ||
272 | ldc r2,sr ! set BL bit | ||
273 | movca.l r0,@r4 | ||
274 | ocbi @r4 | ||
275 | add #32,r4 | ||
276 | movca.l r0,@r4 | ||
277 | ocbi @r4 | ||
278 | add #32,r4 | ||
279 | movca.l r0,@r4 | ||
280 | ocbi @r4 | ||
281 | add #32,r4 | ||
282 | movca.l r0,@r4 | ||
283 | ocbi @r4 | ||
284 | ldc r1,sr ! restore SR | ||
285 | dt r3 | ||
286 | bf/s 1b | ||
287 | add #32,r4 | ||
288 | |||
289 | rts | ||
290 | nop | ||
291 | .align 2 | ||
292 | 2: .long 0xffffc000 | ||
293 | 3: .long empty_zero_page | ||
294 | 4: .long 0x10000000 ! BL bit | ||
295 | #endif | ||
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c new file mode 100644 index 000000000000..1f7af0c73cf4 --- /dev/null +++ b/arch/sh/mm/consistent.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/consistent.c | ||
3 | * | ||
4 | * Copyright (C) 2004 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <asm/io.h> | ||
13 | |||
14 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle) | ||
15 | { | ||
16 | struct page *page, *end, *free; | ||
17 | void *ret; | ||
18 | int order; | ||
19 | |||
20 | size = PAGE_ALIGN(size); | ||
21 | order = get_order(size); | ||
22 | |||
23 | page = alloc_pages(gfp, order); | ||
24 | if (!page) | ||
25 | return NULL; | ||
26 | |||
27 | ret = page_address(page); | ||
28 | *handle = virt_to_phys(ret); | ||
29 | |||
30 | /* | ||
31 | * We must flush the cache before we pass it on to the device | ||
32 | */ | ||
33 | dma_cache_wback_inv(ret, size); | ||
34 | |||
35 | page = virt_to_page(ret); | ||
36 | free = page + (size >> PAGE_SHIFT); | ||
37 | end = page + (1 << order); | ||
38 | |||
39 | while (++page < end) { | ||
40 | set_page_count(page, 1); | ||
41 | |||
42 | /* Free any unused pages */ | ||
43 | if (page >= free) { | ||
44 | __free_page(page); | ||
45 | } | ||
46 | } | ||
47 | |||
48 | return P2SEGADDR(ret); | ||
49 | } | ||
50 | |||
51 | void consistent_free(void *vaddr, size_t size) | ||
52 | { | ||
53 | unsigned long addr = P1SEGADDR((unsigned long)vaddr); | ||
54 | struct page *page=virt_to_page(addr); | ||
55 | int num_pages=(size+PAGE_SIZE-1) >> PAGE_SHIFT; | ||
56 | int i; | ||
57 | |||
58 | for(i=0;i<num_pages;i++) { | ||
59 | __free_page((page+i)); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | void consistent_sync(void *vaddr, size_t size, int direction) | ||
64 | { | ||
65 | void * p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | ||
66 | |||
67 | switch (direction) { | ||
68 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
69 | dma_cache_inv(p1addr, size); | ||
70 | break; | ||
71 | case DMA_TO_DEVICE: /* writeback only */ | ||
72 | dma_cache_wback(p1addr, size); | ||
73 | break; | ||
74 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
75 | dma_cache_wback_inv(p1addr, size); | ||
76 | break; | ||
77 | default: | ||
78 | BUG(); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | EXPORT_SYMBOL(consistent_alloc); | ||
83 | EXPORT_SYMBOL(consistent_free); | ||
84 | EXPORT_SYMBOL(consistent_sync); | ||
85 | |||
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S new file mode 100644 index 000000000000..1addffe117c3 --- /dev/null +++ b/arch/sh/mm/copy_page.S | |||
@@ -0,0 +1,397 @@ | |||
1 | /* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ | ||
2 | * | ||
3 | * copy_page, __copy_user_page, __copy_user implementation of SuperH | ||
4 | * | ||
5 | * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima | ||
6 | * Copyright (C) 2002 Toshinobu Sugioka | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/linkage.h> | ||
10 | |||
11 | /* | ||
12 | * copy_page_slow | ||
13 | * @to: P1 address | ||
14 | * @from: P1 address | ||
15 | * | ||
16 | * void copy_page_slow(void *to, void *from) | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch | ||
21 | * r8 --- from + 4096 | ||
22 | * r9 --- not used | ||
23 | * r10 --- to | ||
24 | * r11 --- from | ||
25 | */ | ||
26 | ENTRY(copy_page_slow) | ||
27 | mov.l r8,@-r15 | ||
28 | mov.l r10,@-r15 | ||
29 | mov.l r11,@-r15 | ||
30 | mov r4,r10 | ||
31 | mov r5,r11 | ||
32 | mov r5,r8 | ||
33 | mov.w .L4096,r0 | ||
34 | add r0,r8 | ||
35 | ! | ||
36 | 1: mov.l @r11+,r0 | ||
37 | mov.l @r11+,r1 | ||
38 | mov.l @r11+,r2 | ||
39 | mov.l @r11+,r3 | ||
40 | mov.l @r11+,r4 | ||
41 | mov.l @r11+,r5 | ||
42 | mov.l @r11+,r6 | ||
43 | mov.l @r11+,r7 | ||
44 | #if defined(CONFIG_CPU_SH3) | ||
45 | mov.l r0,@r10 | ||
46 | #elif defined(CONFIG_CPU_SH4) | ||
47 | movca.l r0,@r10 | ||
48 | mov r10,r0 | ||
49 | #endif | ||
50 | add #32,r10 | ||
51 | mov.l r7,@-r10 | ||
52 | mov.l r6,@-r10 | ||
53 | mov.l r5,@-r10 | ||
54 | mov.l r4,@-r10 | ||
55 | mov.l r3,@-r10 | ||
56 | mov.l r2,@-r10 | ||
57 | mov.l r1,@-r10 | ||
58 | #if defined(CONFIG_CPU_SH4) | ||
59 | ocbwb @r0 | ||
60 | #endif | ||
61 | cmp/eq r11,r8 | ||
62 | bf/s 1b | ||
63 | add #28,r10 | ||
64 | ! | ||
65 | mov.l @r15+,r11 | ||
66 | mov.l @r15+,r10 | ||
67 | mov.l @r15+,r8 | ||
68 | rts | ||
69 | nop | ||
70 | |||
71 | #if defined(CONFIG_CPU_SH4) | ||
72 | /* | ||
73 | * __copy_user_page | ||
74 | * @to: P1 address (with same color) | ||
75 | * @from: P1 address | ||
76 | * @orig_to: P1 address | ||
77 | * | ||
78 | * void __copy_user_page(void *to, void *from, void *orig_to) | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch | ||
83 | * r8 --- from + 4096 | ||
84 | * r9 --- orig_to | ||
85 | * r10 --- to | ||
86 | * r11 --- from | ||
87 | */ | ||
88 | ENTRY(__copy_user_page) | ||
89 | mov.l r8,@-r15 | ||
90 | mov.l r9,@-r15 | ||
91 | mov.l r10,@-r15 | ||
92 | mov.l r11,@-r15 | ||
93 | mov r4,r10 | ||
94 | mov r5,r11 | ||
95 | mov r6,r9 | ||
96 | mov r5,r8 | ||
97 | mov.w .L4096,r0 | ||
98 | add r0,r8 | ||
99 | ! | ||
100 | 1: ocbi @r9 | ||
101 | add #32,r9 | ||
102 | mov.l @r11+,r0 | ||
103 | mov.l @r11+,r1 | ||
104 | mov.l @r11+,r2 | ||
105 | mov.l @r11+,r3 | ||
106 | mov.l @r11+,r4 | ||
107 | mov.l @r11+,r5 | ||
108 | mov.l @r11+,r6 | ||
109 | mov.l @r11+,r7 | ||
110 | movca.l r0,@r10 | ||
111 | mov r10,r0 | ||
112 | add #32,r10 | ||
113 | mov.l r7,@-r10 | ||
114 | mov.l r6,@-r10 | ||
115 | mov.l r5,@-r10 | ||
116 | mov.l r4,@-r10 | ||
117 | mov.l r3,@-r10 | ||
118 | mov.l r2,@-r10 | ||
119 | mov.l r1,@-r10 | ||
120 | ocbwb @r0 | ||
121 | cmp/eq r11,r8 | ||
122 | bf/s 1b | ||
123 | add #28,r10 | ||
124 | ! | ||
125 | mov.l @r15+,r11 | ||
126 | mov.l @r15+,r10 | ||
127 | mov.l @r15+,r9 | ||
128 | mov.l @r15+,r8 | ||
129 | rts | ||
130 | nop | ||
131 | #endif | ||
132 | .L4096: .word 4096 | ||
133 | /* | ||
134 | * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | ||
135 | * Return the number of bytes NOT copied | ||
136 | */ | ||
137 | #define EX(...) \ | ||
138 | 9999: __VA_ARGS__ ; \ | ||
139 | .section __ex_table, "a"; \ | ||
140 | .long 9999b, 6000f ; \ | ||
141 | .previous | ||
142 | ENTRY(__copy_user) | ||
143 | tst r6,r6 ! Check explicitly for zero | ||
144 | bf 1f | ||
145 | rts | ||
146 | mov #0,r0 ! normal return | ||
147 | 1: | ||
148 | mov.l r10,@-r15 | ||
149 | mov.l r9,@-r15 | ||
150 | mov.l r8,@-r15 | ||
151 | mov r4,r3 | ||
152 | add r6,r3 ! last destination address | ||
153 | mov #12,r0 ! Check if small number of bytes | ||
154 | cmp/gt r0,r6 | ||
155 | bt 2f | ||
156 | bra .L_cleanup_loop | ||
157 | nop | ||
158 | 2: | ||
159 | neg r5,r0 ! Calculate bytes needed to align source | ||
160 | add #4,r0 | ||
161 | and #3,r0 | ||
162 | tst r0,r0 | ||
163 | bt .L_jump | ||
164 | mov r0,r1 | ||
165 | |||
166 | .L_loop1: | ||
167 | ! Copy bytes to align source | ||
168 | EX( mov.b @r5+,r0 ) | ||
169 | dt r1 | ||
170 | EX( mov.b r0,@r4 ) | ||
171 | add #-1,r6 | ||
172 | bf/s .L_loop1 | ||
173 | add #1,r4 | ||
174 | |||
175 | .L_jump: | ||
176 | mov r6,r2 ! Calculate number of longwords to copy | ||
177 | shlr2 r2 | ||
178 | tst r2,r2 | ||
179 | bt .L_cleanup | ||
180 | |||
181 | mov r4,r0 ! Jump to appropriate routine | ||
182 | and #3,r0 | ||
183 | mov r0,r1 | ||
184 | shll2 r1 | ||
185 | mova .L_jump_tbl,r0 | ||
186 | mov.l @(r0,r1),r1 | ||
187 | jmp @r1 | ||
188 | nop | ||
189 | |||
190 | .align 2 | ||
191 | .L_jump_tbl: | ||
192 | .long .L_dest00 | ||
193 | .long .L_dest01 | ||
194 | .long .L_dest10 | ||
195 | .long .L_dest11 | ||
196 | |||
197 | ! Destination = 00 | ||
198 | |||
199 | .L_dest00: | ||
200 | mov r2,r7 | ||
201 | shlr2 r7 | ||
202 | shlr r7 | ||
203 | tst r7,r7 | ||
204 | mov #7,r0 | ||
205 | bt/s 1f | ||
206 | and r0,r2 | ||
207 | .align 2 | ||
208 | 2: | ||
209 | EX( mov.l @r5+,r0 ) | ||
210 | EX( mov.l @r5+,r8 ) | ||
211 | EX( mov.l @r5+,r9 ) | ||
212 | EX( mov.l @r5+,r10 ) | ||
213 | EX( mov.l r0,@r4 ) | ||
214 | EX( mov.l r8,@(4,r4) ) | ||
215 | EX( mov.l r9,@(8,r4) ) | ||
216 | EX( mov.l r10,@(12,r4) ) | ||
217 | EX( mov.l @r5+,r0 ) | ||
218 | EX( mov.l @r5+,r8 ) | ||
219 | EX( mov.l @r5+,r9 ) | ||
220 | EX( mov.l @r5+,r10 ) | ||
221 | dt r7 | ||
222 | EX( mov.l r0,@(16,r4) ) | ||
223 | EX( mov.l r8,@(20,r4) ) | ||
224 | EX( mov.l r9,@(24,r4) ) | ||
225 | EX( mov.l r10,@(28,r4) ) | ||
226 | bf/s 2b | ||
227 | add #32,r4 | ||
228 | tst r2,r2 | ||
229 | bt .L_cleanup | ||
230 | 1: | ||
231 | EX( mov.l @r5+,r0 ) | ||
232 | dt r2 | ||
233 | EX( mov.l r0,@r4 ) | ||
234 | bf/s 1b | ||
235 | add #4,r4 | ||
236 | |||
237 | bra .L_cleanup | ||
238 | nop | ||
239 | |||
240 | ! Destination = 10 | ||
241 | |||
242 | .L_dest10: | ||
243 | mov r2,r7 | ||
244 | shlr2 r7 | ||
245 | shlr r7 | ||
246 | tst r7,r7 | ||
247 | mov #7,r0 | ||
248 | bt/s 1f | ||
249 | and r0,r2 | ||
250 | 2: | ||
251 | dt r7 | ||
252 | #ifdef __LITTLE_ENDIAN__ | ||
253 | EX( mov.l @r5+,r0 ) | ||
254 | EX( mov.l @r5+,r1 ) | ||
255 | EX( mov.l @r5+,r8 ) | ||
256 | EX( mov.l @r5+,r9 ) | ||
257 | EX( mov.l @r5+,r10 ) | ||
258 | EX( mov.w r0,@r4 ) | ||
259 | add #2,r4 | ||
260 | xtrct r1,r0 | ||
261 | xtrct r8,r1 | ||
262 | xtrct r9,r8 | ||
263 | xtrct r10,r9 | ||
264 | |||
265 | EX( mov.l r0,@r4 ) | ||
266 | EX( mov.l r1,@(4,r4) ) | ||
267 | EX( mov.l r8,@(8,r4) ) | ||
268 | EX( mov.l r9,@(12,r4) ) | ||
269 | |||
270 | EX( mov.l @r5+,r1 ) | ||
271 | EX( mov.l @r5+,r8 ) | ||
272 | EX( mov.l @r5+,r0 ) | ||
273 | xtrct r1,r10 | ||
274 | xtrct r8,r1 | ||
275 | xtrct r0,r8 | ||
276 | shlr16 r0 | ||
277 | EX( mov.l r10,@(16,r4) ) | ||
278 | EX( mov.l r1,@(20,r4) ) | ||
279 | EX( mov.l r8,@(24,r4) ) | ||
280 | EX( mov.w r0,@(28,r4) ) | ||
281 | bf/s 2b | ||
282 | add #30,r4 | ||
283 | #else | ||
284 | EX( mov.l @(28,r5),r0 ) | ||
285 | EX( mov.l @(24,r5),r8 ) | ||
286 | EX( mov.l @(20,r5),r9 ) | ||
287 | EX( mov.l @(16,r5),r10 ) | ||
288 | EX( mov.w r0,@(30,r4) ) | ||
289 | add #-2,r4 | ||
290 | xtrct r8,r0 | ||
291 | xtrct r9,r8 | ||
292 | xtrct r10,r9 | ||
293 | EX( mov.l r0,@(28,r4) ) | ||
294 | EX( mov.l r8,@(24,r4) ) | ||
295 | EX( mov.l r9,@(20,r4) ) | ||
296 | |||
297 | EX( mov.l @(12,r5),r0 ) | ||
298 | EX( mov.l @(8,r5),r8 ) | ||
299 | xtrct r0,r10 | ||
300 | EX( mov.l @(4,r5),r9 ) | ||
301 | mov.l r10,@(16,r4) | ||
302 | EX( mov.l @r5,r10 ) | ||
303 | xtrct r8,r0 | ||
304 | xtrct r9,r8 | ||
305 | xtrct r10,r9 | ||
306 | EX( mov.l r0,@(12,r4) ) | ||
307 | EX( mov.l r8,@(8,r4) ) | ||
308 | swap.w r10,r0 | ||
309 | EX( mov.l r9,@(4,r4) ) | ||
310 | EX( mov.w r0,@(2,r4) ) | ||
311 | |||
312 | add #32,r5 | ||
313 | bf/s 2b | ||
314 | add #34,r4 | ||
315 | #endif | ||
316 | tst r2,r2 | ||
317 | bt .L_cleanup | ||
318 | |||
319 | 1: ! Read longword, write two words per iteration | ||
320 | EX( mov.l @r5+,r0 ) | ||
321 | dt r2 | ||
322 | #ifdef __LITTLE_ENDIAN__ | ||
323 | EX( mov.w r0,@r4 ) | ||
324 | shlr16 r0 | ||
325 | EX( mov.w r0,@(2,r4) ) | ||
326 | #else | ||
327 | EX( mov.w r0,@(2,r4) ) | ||
328 | shlr16 r0 | ||
329 | EX( mov.w r0,@r4 ) | ||
330 | #endif | ||
331 | bf/s 1b | ||
332 | add #4,r4 | ||
333 | |||
334 | bra .L_cleanup | ||
335 | nop | ||
336 | |||
337 | ! Destination = 01 or 11 | ||
338 | |||
339 | .L_dest01: | ||
340 | .L_dest11: | ||
341 | ! Read longword, write byte, word, byte per iteration | ||
342 | EX( mov.l @r5+,r0 ) | ||
343 | dt r2 | ||
344 | #ifdef __LITTLE_ENDIAN__ | ||
345 | EX( mov.b r0,@r4 ) | ||
346 | shlr8 r0 | ||
347 | add #1,r4 | ||
348 | EX( mov.w r0,@r4 ) | ||
349 | shlr16 r0 | ||
350 | EX( mov.b r0,@(2,r4) ) | ||
351 | bf/s .L_dest01 | ||
352 | add #3,r4 | ||
353 | #else | ||
354 | EX( mov.b r0,@(3,r4) ) | ||
355 | shlr8 r0 | ||
356 | swap.w r0,r7 | ||
357 | EX( mov.b r7,@r4 ) | ||
358 | add #1,r4 | ||
359 | EX( mov.w r0,@r4 ) | ||
360 | bf/s .L_dest01 | ||
361 | add #3,r4 | ||
362 | #endif | ||
363 | |||
364 | ! Cleanup last few bytes | ||
365 | .L_cleanup: | ||
366 | mov r6,r0 | ||
367 | and #3,r0 | ||
368 | tst r0,r0 | ||
369 | bt .L_exit | ||
370 | mov r0,r6 | ||
371 | |||
372 | .L_cleanup_loop: | ||
373 | EX( mov.b @r5+,r0 ) | ||
374 | dt r6 | ||
375 | EX( mov.b r0,@r4 ) | ||
376 | bf/s .L_cleanup_loop | ||
377 | add #1,r4 | ||
378 | |||
379 | .L_exit: | ||
380 | mov #0,r0 ! normal return | ||
381 | 5000: | ||
382 | |||
383 | # Exception handler: | ||
384 | .section .fixup, "ax" | ||
385 | 6000: | ||
386 | mov.l 8000f,r1 | ||
387 | mov r3,r0 | ||
388 | jmp @r1 | ||
389 | sub r4,r0 | ||
390 | .align 2 | ||
391 | 8000: .long 5000b | ||
392 | |||
393 | .previous | ||
394 | mov.l @r15+,r8 | ||
395 | mov.l @r15+,r9 | ||
396 | rts | ||
397 | mov.l @r15+,r10 | ||
diff --git a/arch/sh/mm/extable.c b/arch/sh/mm/extable.c new file mode 100644 index 000000000000..505ede7c21bf --- /dev/null +++ b/arch/sh/mm/extable.c | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * linux/arch/sh/mm/extable.c | ||
3 | * Taken from: | ||
4 | * linux/arch/i386/mm/extable.c | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <asm/uaccess.h> | ||
10 | |||
11 | int fixup_exception(struct pt_regs *regs) | ||
12 | { | ||
13 | const struct exception_table_entry *fixup; | ||
14 | |||
15 | fixup = search_exception_tables(regs->pc); | ||
16 | if (fixup) { | ||
17 | regs->pc = fixup->fixup; | ||
18 | return 1; | ||
19 | } | ||
20 | |||
21 | return 0; | ||
22 | } | ||
diff --git a/arch/sh/mm/fault-nommu.c b/arch/sh/mm/fault-nommu.c new file mode 100644 index 000000000000..34d4e0c68fbb --- /dev/null +++ b/arch/sh/mm/fault-nommu.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/fault-nommu.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mundt | ||
5 | * | ||
6 | * Based on linux/arch/sh/mm/fault.c: | ||
7 | * Copyright (C) 1999 Niibe Yutaka | ||
8 | * | ||
9 | * Released under the terms of the GNU GPL v2.0. | ||
10 | */ | ||
11 | |||
12 | #include <linux/signal.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | |||
25 | #include <asm/system.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | #include <asm/mmu_context.h> | ||
30 | #include <asm/cacheflush.h> | ||
31 | |||
32 | #if defined(CONFIG_SH_KGDB) | ||
33 | #include <asm/kgdb.h> | ||
34 | #endif | ||
35 | |||
36 | extern void die(const char *,struct pt_regs *,long); | ||
37 | |||
38 | /* | ||
39 | * This routine handles page faults. It determines the address, | ||
40 | * and the problem, and then passes it off to one of the appropriate | ||
41 | * routines. | ||
42 | */ | ||
43 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | ||
44 | unsigned long address) | ||
45 | { | ||
46 | #if defined(CONFIG_SH_KGDB) | ||
47 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
48 | kgdb_bus_err_hook(); | ||
49 | #endif | ||
50 | |||
51 | /* | ||
52 | * Oops. The kernel tried to access some bad page. We'll have to | ||
53 | * terminate things with extreme prejudice. | ||
54 | * | ||
55 | */ | ||
56 | if (address < PAGE_SIZE) { | ||
57 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
58 | } else { | ||
59 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
60 | } | ||
61 | |||
62 | printk(" at virtual address %08lx\n", address); | ||
63 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | ||
64 | |||
65 | die("Oops", regs, writeaccess); | ||
66 | do_exit(SIGKILL); | ||
67 | } | ||
68 | |||
69 | asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | ||
70 | unsigned long address) | ||
71 | { | ||
72 | #if defined(CONFIG_SH_KGDB) | ||
73 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
74 | kgdb_bus_err_hook(); | ||
75 | #endif | ||
76 | |||
77 | if (address >= TASK_SIZE) | ||
78 | return 1; | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c new file mode 100644 index 000000000000..7abba2161da6 --- /dev/null +++ b/arch/sh/mm/fault.c | |||
@@ -0,0 +1,374 @@ | |||
1 | /* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ | ||
2 | * | ||
3 | * linux/arch/sh/mm/fault.c | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * Copyright (C) 2003 Paul Mundt | ||
6 | * | ||
7 | * Based on linux/arch/i386/mm/fault.c: | ||
8 | * Copyright (C) 1995 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/module.h> | ||
24 | |||
25 | #include <asm/system.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | #include <asm/mmu_context.h> | ||
30 | #include <asm/cacheflush.h> | ||
31 | #include <asm/kgdb.h> | ||
32 | |||
33 | extern void die(const char *,struct pt_regs *,long); | ||
34 | |||
35 | /* | ||
36 | * This routine handles page faults. It determines the address, | ||
37 | * and the problem, and then passes it off to one of the appropriate | ||
38 | * routines. | ||
39 | */ | ||
40 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | ||
41 | unsigned long address) | ||
42 | { | ||
43 | struct task_struct *tsk; | ||
44 | struct mm_struct *mm; | ||
45 | struct vm_area_struct * vma; | ||
46 | unsigned long page; | ||
47 | |||
48 | #ifdef CONFIG_SH_KGDB | ||
49 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
50 | kgdb_bus_err_hook(); | ||
51 | #endif | ||
52 | |||
53 | tsk = current; | ||
54 | mm = tsk->mm; | ||
55 | |||
56 | /* | ||
57 | * If we're in an interrupt or have no user | ||
58 | * context, we must not take the fault.. | ||
59 | */ | ||
60 | if (in_atomic() || !mm) | ||
61 | goto no_context; | ||
62 | |||
63 | down_read(&mm->mmap_sem); | ||
64 | |||
65 | vma = find_vma(mm, address); | ||
66 | if (!vma) | ||
67 | goto bad_area; | ||
68 | if (vma->vm_start <= address) | ||
69 | goto good_area; | ||
70 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
71 | goto bad_area; | ||
72 | if (expand_stack(vma, address)) | ||
73 | goto bad_area; | ||
74 | /* | ||
75 | * Ok, we have a good vm_area for this memory access, so | ||
76 | * we can handle it.. | ||
77 | */ | ||
78 | good_area: | ||
79 | if (writeaccess) { | ||
80 | if (!(vma->vm_flags & VM_WRITE)) | ||
81 | goto bad_area; | ||
82 | } else { | ||
83 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
84 | goto bad_area; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * If for any reason at all we couldn't handle the fault, | ||
89 | * make sure we exit gracefully rather than endlessly redo | ||
90 | * the fault. | ||
91 | */ | ||
92 | survive: | ||
93 | switch (handle_mm_fault(mm, vma, address, writeaccess)) { | ||
94 | case VM_FAULT_MINOR: | ||
95 | tsk->min_flt++; | ||
96 | break; | ||
97 | case VM_FAULT_MAJOR: | ||
98 | tsk->maj_flt++; | ||
99 | break; | ||
100 | case VM_FAULT_SIGBUS: | ||
101 | goto do_sigbus; | ||
102 | case VM_FAULT_OOM: | ||
103 | goto out_of_memory; | ||
104 | default: | ||
105 | BUG(); | ||
106 | } | ||
107 | |||
108 | up_read(&mm->mmap_sem); | ||
109 | return; | ||
110 | |||
111 | /* | ||
112 | * Something tried to access memory that isn't in our memory map.. | ||
113 | * Fix it, but check if it's kernel or user first.. | ||
114 | */ | ||
115 | bad_area: | ||
116 | up_read(&mm->mmap_sem); | ||
117 | |||
118 | if (user_mode(regs)) { | ||
119 | tsk->thread.address = address; | ||
120 | tsk->thread.error_code = writeaccess; | ||
121 | force_sig(SIGSEGV, tsk); | ||
122 | return; | ||
123 | } | ||
124 | |||
125 | no_context: | ||
126 | /* Are we prepared to handle this kernel fault? */ | ||
127 | if (fixup_exception(regs)) | ||
128 | return; | ||
129 | |||
130 | /* | ||
131 | * Oops. The kernel tried to access some bad page. We'll have to | ||
132 | * terminate things with extreme prejudice. | ||
133 | * | ||
134 | */ | ||
135 | if (address < PAGE_SIZE) | ||
136 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
137 | else | ||
138 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
139 | printk(" at virtual address %08lx\n", address); | ||
140 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | ||
141 | asm volatile("mov.l %1, %0" | ||
142 | : "=r" (page) | ||
143 | : "m" (__m(MMU_TTB))); | ||
144 | if (page) { | ||
145 | page = ((unsigned long *) page)[address >> 22]; | ||
146 | printk(KERN_ALERT "*pde = %08lx\n", page); | ||
147 | if (page & _PAGE_PRESENT) { | ||
148 | page &= PAGE_MASK; | ||
149 | address &= 0x003ff000; | ||
150 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | ||
151 | printk(KERN_ALERT "*pte = %08lx\n", page); | ||
152 | } | ||
153 | } | ||
154 | die("Oops", regs, writeaccess); | ||
155 | do_exit(SIGKILL); | ||
156 | |||
157 | /* | ||
158 | * We ran out of memory, or some other thing happened to us that made | ||
159 | * us unable to handle the page fault gracefully. | ||
160 | */ | ||
161 | out_of_memory: | ||
162 | up_read(&mm->mmap_sem); | ||
163 | if (current->pid == 1) { | ||
164 | yield(); | ||
165 | down_read(&mm->mmap_sem); | ||
166 | goto survive; | ||
167 | } | ||
168 | printk("VM: killing process %s\n", tsk->comm); | ||
169 | if (user_mode(regs)) | ||
170 | do_exit(SIGKILL); | ||
171 | goto no_context; | ||
172 | |||
173 | do_sigbus: | ||
174 | up_read(&mm->mmap_sem); | ||
175 | |||
176 | /* | ||
177 | * Send a sigbus, regardless of whether we were in kernel | ||
178 | * or user mode. | ||
179 | */ | ||
180 | tsk->thread.address = address; | ||
181 | tsk->thread.error_code = writeaccess; | ||
182 | tsk->thread.trap_no = 14; | ||
183 | force_sig(SIGBUS, tsk); | ||
184 | |||
185 | /* Kernel mode? Handle exceptions or die */ | ||
186 | if (!user_mode(regs)) | ||
187 | goto no_context; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Called with interrupt disabled. | ||
192 | */ | ||
193 | asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | ||
194 | unsigned long address) | ||
195 | { | ||
196 | unsigned long addrmax = P4SEG; | ||
197 | pgd_t *dir; | ||
198 | pmd_t *pmd; | ||
199 | pte_t *pte; | ||
200 | pte_t entry; | ||
201 | |||
202 | #ifdef CONFIG_SH_KGDB | ||
203 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
204 | kgdb_bus_err_hook(); | ||
205 | #endif | ||
206 | |||
207 | #ifdef CONFIG_SH_STORE_QUEUES | ||
208 | addrmax = P4SEG_STORE_QUE + 0x04000000; | ||
209 | #endif | ||
210 | |||
211 | if (address >= P3SEG && address < addrmax) | ||
212 | dir = pgd_offset_k(address); | ||
213 | else if (address >= TASK_SIZE) | ||
214 | return 1; | ||
215 | else if (!current->mm) | ||
216 | return 1; | ||
217 | else | ||
218 | dir = pgd_offset(current->mm, address); | ||
219 | |||
220 | pmd = pmd_offset(dir, address); | ||
221 | if (pmd_none(*pmd)) | ||
222 | return 1; | ||
223 | if (pmd_bad(*pmd)) { | ||
224 | pmd_ERROR(*pmd); | ||
225 | pmd_clear(pmd); | ||
226 | return 1; | ||
227 | } | ||
228 | pte = pte_offset_kernel(pmd, address); | ||
229 | entry = *pte; | ||
230 | if (pte_none(entry) || pte_not_present(entry) | ||
231 | || (writeaccess && !pte_write(entry))) | ||
232 | return 1; | ||
233 | |||
234 | if (writeaccess) | ||
235 | entry = pte_mkdirty(entry); | ||
236 | entry = pte_mkyoung(entry); | ||
237 | |||
238 | #ifdef CONFIG_CPU_SH4 | ||
239 | /* | ||
240 | * ITLB is not affected by "ldtlb" instruction. | ||
241 | * So, we need to flush the entry by ourselves. | ||
242 | */ | ||
243 | |||
244 | { | ||
245 | unsigned long flags; | ||
246 | local_irq_save(flags); | ||
247 | __flush_tlb_page(get_asid(), address&PAGE_MASK); | ||
248 | local_irq_restore(flags); | ||
249 | } | ||
250 | #endif | ||
251 | |||
252 | set_pte(pte, entry); | ||
253 | update_mmu_cache(NULL, address, entry); | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
259 | { | ||
260 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | ||
261 | unsigned long flags; | ||
262 | unsigned long asid; | ||
263 | unsigned long saved_asid = MMU_NO_ASID; | ||
264 | |||
265 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | ||
266 | page &= PAGE_MASK; | ||
267 | |||
268 | local_irq_save(flags); | ||
269 | if (vma->vm_mm != current->mm) { | ||
270 | saved_asid = get_asid(); | ||
271 | set_asid(asid); | ||
272 | } | ||
273 | __flush_tlb_page(asid, page); | ||
274 | if (saved_asid != MMU_NO_ASID) | ||
275 | set_asid(saved_asid); | ||
276 | local_irq_restore(flags); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
281 | unsigned long end) | ||
282 | { | ||
283 | struct mm_struct *mm = vma->vm_mm; | ||
284 | |||
285 | if (mm->context != NO_CONTEXT) { | ||
286 | unsigned long flags; | ||
287 | int size; | ||
288 | |||
289 | local_irq_save(flags); | ||
290 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
291 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | ||
292 | mm->context = NO_CONTEXT; | ||
293 | if (mm == current->mm) | ||
294 | activate_context(mm); | ||
295 | } else { | ||
296 | unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; | ||
297 | unsigned long saved_asid = MMU_NO_ASID; | ||
298 | |||
299 | start &= PAGE_MASK; | ||
300 | end += (PAGE_SIZE - 1); | ||
301 | end &= PAGE_MASK; | ||
302 | if (mm != current->mm) { | ||
303 | saved_asid = get_asid(); | ||
304 | set_asid(asid); | ||
305 | } | ||
306 | while (start < end) { | ||
307 | __flush_tlb_page(asid, start); | ||
308 | start += PAGE_SIZE; | ||
309 | } | ||
310 | if (saved_asid != MMU_NO_ASID) | ||
311 | set_asid(saved_asid); | ||
312 | } | ||
313 | local_irq_restore(flags); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
318 | { | ||
319 | unsigned long flags; | ||
320 | int size; | ||
321 | |||
322 | local_irq_save(flags); | ||
323 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
324 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | ||
325 | flush_tlb_all(); | ||
326 | } else { | ||
327 | unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; | ||
328 | unsigned long saved_asid = get_asid(); | ||
329 | |||
330 | start &= PAGE_MASK; | ||
331 | end += (PAGE_SIZE - 1); | ||
332 | end &= PAGE_MASK; | ||
333 | set_asid(asid); | ||
334 | while (start < end) { | ||
335 | __flush_tlb_page(asid, start); | ||
336 | start += PAGE_SIZE; | ||
337 | } | ||
338 | set_asid(saved_asid); | ||
339 | } | ||
340 | local_irq_restore(flags); | ||
341 | } | ||
342 | |||
343 | void flush_tlb_mm(struct mm_struct *mm) | ||
344 | { | ||
345 | /* Invalidate all TLB of this process. */ | ||
346 | /* Instead of invalidating each TLB, we get new MMU context. */ | ||
347 | if (mm->context != NO_CONTEXT) { | ||
348 | unsigned long flags; | ||
349 | |||
350 | local_irq_save(flags); | ||
351 | mm->context = NO_CONTEXT; | ||
352 | if (mm == current->mm) | ||
353 | activate_context(mm); | ||
354 | local_irq_restore(flags); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | void flush_tlb_all(void) | ||
359 | { | ||
360 | unsigned long flags, status; | ||
361 | |||
362 | /* | ||
363 | * Flush all the TLB. | ||
364 | * | ||
365 | * Write to the MMU control register's bit: | ||
366 | * TF-bit for SH-3, TI-bit for SH-4. | ||
367 | * It's same position, bit #2. | ||
368 | */ | ||
369 | local_irq_save(flags); | ||
370 | status = ctrl_inl(MMUCR); | ||
371 | status |= 0x04; | ||
372 | ctrl_outl(status, MMUCR); | ||
373 | local_irq_restore(flags); | ||
374 | } | ||
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c new file mode 100644 index 000000000000..1f897bab2318 --- /dev/null +++ b/arch/sh/mm/hugetlbpage.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/hugetlbpage.c | ||
3 | * | ||
4 | * SuperH HugeTLB page support. | ||
5 | * | ||
6 | * Cloned from sparc64 by Paul Mundt. | ||
7 | * | ||
8 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/hugetlb.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | |||
21 | #include <asm/mman.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/tlb.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | |||
27 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | ||
28 | { | ||
29 | pgd_t *pgd; | ||
30 | pmd_t *pmd; | ||
31 | pte_t *pte = NULL; | ||
32 | |||
33 | pgd = pgd_offset(mm, addr); | ||
34 | if (pgd) { | ||
35 | pmd = pmd_alloc(mm, pgd, addr); | ||
36 | if (pmd) | ||
37 | pte = pte_alloc_map(mm, pmd, addr); | ||
38 | } | ||
39 | return pte; | ||
40 | } | ||
41 | |||
42 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
43 | { | ||
44 | pgd_t *pgd; | ||
45 | pmd_t *pmd; | ||
46 | pte_t *pte = NULL; | ||
47 | |||
48 | pgd = pgd_offset(mm, addr); | ||
49 | if (pgd) { | ||
50 | pmd = pmd_offset(pgd, addr); | ||
51 | if (pmd) | ||
52 | pte = pte_offset_map(pmd, addr); | ||
53 | } | ||
54 | return pte; | ||
55 | } | ||
56 | |||
57 | #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) | ||
58 | |||
59 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | ||
60 | struct page *page, pte_t * page_table, int write_access) | ||
61 | { | ||
62 | unsigned long i; | ||
63 | pte_t entry; | ||
64 | |||
65 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
66 | |||
67 | if (write_access) | ||
68 | entry = pte_mkwrite(pte_mkdirty(mk_pte(page, | ||
69 | vma->vm_page_prot))); | ||
70 | else | ||
71 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
72 | entry = pte_mkyoung(entry); | ||
73 | mk_pte_huge(entry); | ||
74 | |||
75 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
76 | set_pte(page_table, entry); | ||
77 | page_table++; | ||
78 | |||
79 | pte_val(entry) += PAGE_SIZE; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * This function checks for proper alignment of input addr and len parameters. | ||
85 | */ | ||
86 | int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | ||
87 | { | ||
88 | if (len & ~HPAGE_MASK) | ||
89 | return -EINVAL; | ||
90 | if (addr & ~HPAGE_MASK) | ||
91 | return -EINVAL; | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
96 | struct vm_area_struct *vma) | ||
97 | { | ||
98 | pte_t *src_pte, *dst_pte, entry; | ||
99 | struct page *ptepage; | ||
100 | unsigned long addr = vma->vm_start; | ||
101 | unsigned long end = vma->vm_end; | ||
102 | int i; | ||
103 | |||
104 | while (addr < end) { | ||
105 | dst_pte = huge_pte_alloc(dst, addr); | ||
106 | if (!dst_pte) | ||
107 | goto nomem; | ||
108 | src_pte = huge_pte_offset(src, addr); | ||
109 | BUG_ON(!src_pte || pte_none(*src_pte)); | ||
110 | entry = *src_pte; | ||
111 | ptepage = pte_page(entry); | ||
112 | get_page(ptepage); | ||
113 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
114 | set_pte(dst_pte, entry); | ||
115 | pte_val(entry) += PAGE_SIZE; | ||
116 | dst_pte++; | ||
117 | } | ||
118 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
119 | addr += HPAGE_SIZE; | ||
120 | } | ||
121 | return 0; | ||
122 | |||
123 | nomem: | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
128 | struct page **pages, struct vm_area_struct **vmas, | ||
129 | unsigned long *position, int *length, int i) | ||
130 | { | ||
131 | unsigned long vaddr = *position; | ||
132 | int remainder = *length; | ||
133 | |||
134 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
135 | |||
136 | while (vaddr < vma->vm_end && remainder) { | ||
137 | if (pages) { | ||
138 | pte_t *pte; | ||
139 | struct page *page; | ||
140 | |||
141 | pte = huge_pte_offset(mm, vaddr); | ||
142 | |||
143 | /* hugetlb should be locked, and hence, prefaulted */ | ||
144 | BUG_ON(!pte || pte_none(*pte)); | ||
145 | |||
146 | page = pte_page(*pte); | ||
147 | |||
148 | WARN_ON(!PageCompound(page)); | ||
149 | |||
150 | get_page(page); | ||
151 | pages[i] = page; | ||
152 | } | ||
153 | |||
154 | if (vmas) | ||
155 | vmas[i] = vma; | ||
156 | |||
157 | vaddr += PAGE_SIZE; | ||
158 | --remainder; | ||
159 | ++i; | ||
160 | } | ||
161 | |||
162 | *length = remainder; | ||
163 | *position = vaddr; | ||
164 | |||
165 | return i; | ||
166 | } | ||
167 | |||
168 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
169 | unsigned long address, int write) | ||
170 | { | ||
171 | return ERR_PTR(-EINVAL); | ||
172 | } | ||
173 | |||
174 | int pmd_huge(pmd_t pmd) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
180 | pmd_t *pmd, int write) | ||
181 | { | ||
182 | return NULL; | ||
183 | } | ||
184 | |||
185 | void unmap_hugepage_range(struct vm_area_struct *vma, | ||
186 | unsigned long start, unsigned long end) | ||
187 | { | ||
188 | struct mm_struct *mm = vma->vm_mm; | ||
189 | unsigned long address; | ||
190 | pte_t *pte; | ||
191 | struct page *page; | ||
192 | int i; | ||
193 | |||
194 | BUG_ON(start & (HPAGE_SIZE - 1)); | ||
195 | BUG_ON(end & (HPAGE_SIZE - 1)); | ||
196 | |||
197 | for (address = start; address < end; address += HPAGE_SIZE) { | ||
198 | pte = huge_pte_offset(mm, address); | ||
199 | BUG_ON(!pte); | ||
200 | if (pte_none(*pte)) | ||
201 | continue; | ||
202 | page = pte_page(*pte); | ||
203 | put_page(page); | ||
204 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
205 | pte_clear(mm, address+(i*PAGE_SIZE), pte); | ||
206 | pte++; | ||
207 | } | ||
208 | } | ||
209 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); | ||
210 | flush_tlb_range(vma, start, end); | ||
211 | } | ||
212 | |||
213 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
214 | { | ||
215 | struct mm_struct *mm = current->mm; | ||
216 | unsigned long addr; | ||
217 | int ret = 0; | ||
218 | |||
219 | BUG_ON(vma->vm_start & ~HPAGE_MASK); | ||
220 | BUG_ON(vma->vm_end & ~HPAGE_MASK); | ||
221 | |||
222 | spin_lock(&mm->page_table_lock); | ||
223 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
224 | unsigned long idx; | ||
225 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
226 | struct page *page; | ||
227 | |||
228 | if (!pte) { | ||
229 | ret = -ENOMEM; | ||
230 | goto out; | ||
231 | } | ||
232 | if (!pte_none(*pte)) | ||
233 | continue; | ||
234 | |||
235 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
236 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
237 | page = find_get_page(mapping, idx); | ||
238 | if (!page) { | ||
239 | /* charge the fs quota first */ | ||
240 | if (hugetlb_get_quota(mapping)) { | ||
241 | ret = -ENOMEM; | ||
242 | goto out; | ||
243 | } | ||
244 | page = alloc_huge_page(); | ||
245 | if (!page) { | ||
246 | hugetlb_put_quota(mapping); | ||
247 | ret = -ENOMEM; | ||
248 | goto out; | ||
249 | } | ||
250 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
251 | if (! ret) { | ||
252 | unlock_page(page); | ||
253 | } else { | ||
254 | hugetlb_put_quota(mapping); | ||
255 | free_huge_page(page); | ||
256 | goto out; | ||
257 | } | ||
258 | } | ||
259 | set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); | ||
260 | } | ||
261 | out: | ||
262 | spin_unlock(&mm->page_table_lock); | ||
263 | return ret; | ||
264 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c new file mode 100644 index 000000000000..4e9c854845a4 --- /dev/null +++ b/arch/sh/mm/init.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ | ||
2 | * | ||
3 | * linux/arch/sh/mm/init.c | ||
4 | * | ||
5 | * Copyright (C) 1999 Niibe Yutaka | ||
6 | * Copyright (C) 2002, 2004 Paul Mundt | ||
7 | * | ||
8 | * Based on linux/arch/i386/mm/init.c: | ||
9 | * Copyright (C) 1995 Linus Torvalds | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/mman.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/swap.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/highmem.h> | ||
26 | #include <linux/bootmem.h> | ||
27 | #include <linux/pagemap.h> | ||
28 | |||
29 | #include <asm/processor.h> | ||
30 | #include <asm/system.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/mmu_context.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/tlb.h> | ||
37 | #include <asm/cacheflush.h> | ||
38 | #include <asm/cache.h> | ||
39 | |||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
41 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
42 | |||
43 | /* | ||
44 | * Cache of MMU context last used. | ||
45 | */ | ||
46 | unsigned long mmu_context_cache = NO_CONTEXT; | ||
47 | |||
48 | #ifdef CONFIG_MMU | ||
49 | /* It'd be good if these lines were in the standard header file. */ | ||
50 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | ||
51 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) | ||
52 | #endif | ||
53 | |||
54 | #ifdef CONFIG_DISCONTIGMEM | ||
55 | pg_data_t discontig_page_data[MAX_NUMNODES]; | ||
56 | bootmem_data_t discontig_node_bdata[MAX_NUMNODES]; | ||
57 | #endif | ||
58 | |||
59 | void (*copy_page)(void *from, void *to); | ||
60 | void (*clear_page)(void *to); | ||
61 | |||
62 | void show_mem(void) | ||
63 | { | ||
64 | int i, total = 0, reserved = 0; | ||
65 | int shared = 0, cached = 0; | ||
66 | |||
67 | printk("Mem-info:\n"); | ||
68 | show_free_areas(); | ||
69 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
70 | i = max_mapnr; | ||
71 | while (i-- > 0) { | ||
72 | total++; | ||
73 | if (PageReserved(mem_map+i)) | ||
74 | reserved++; | ||
75 | else if (PageSwapCache(mem_map+i)) | ||
76 | cached++; | ||
77 | else if (page_count(mem_map+i)) | ||
78 | shared += page_count(mem_map+i) - 1; | ||
79 | } | ||
80 | printk("%d pages of RAM\n",total); | ||
81 | printk("%d reserved pages\n",reserved); | ||
82 | printk("%d pages shared\n",shared); | ||
83 | printk("%d pages swap cached\n",cached); | ||
84 | } | ||
85 | |||
86 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | ||
87 | { | ||
88 | pgd_t *pgd; | ||
89 | pmd_t *pmd; | ||
90 | pte_t *pte; | ||
91 | |||
92 | pgd = swapper_pg_dir + pgd_index(addr); | ||
93 | if (pgd_none(*pgd)) { | ||
94 | pgd_ERROR(*pgd); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | pmd = pmd_offset(pgd, addr); | ||
99 | if (pmd_none(*pmd)) { | ||
100 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | ||
101 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | ||
102 | if (pte != pte_offset_kernel(pmd, 0)) { | ||
103 | pmd_ERROR(*pmd); | ||
104 | return; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | pte = pte_offset_kernel(pmd, addr); | ||
109 | if (!pte_none(*pte)) { | ||
110 | pte_ERROR(*pte); | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | ||
115 | |||
116 | __flush_tlb_page(get_asid(), addr); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * As a performance optimization, other platforms preserve the fixmap mapping | ||
121 | * across a context switch, we don't presently do this, but this could be done | ||
122 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | ||
123 | * of the memorry mapped UTLB configuration) -- this unfortunately forces us to | ||
124 | * give up a TLB entry for each mapping we want to preserve. While this may be | ||
125 | * viable for a small number of fixmaps, it's not particularly useful for | ||
126 | * everything and needs to be carefully evaluated. (ie, we may want this for | ||
127 | * the vsyscall page). | ||
128 | * | ||
129 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | ||
130 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | ||
131 | * | ||
132 | * -- PFM. | ||
133 | */ | ||
134 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | ||
135 | { | ||
136 | unsigned long address = __fix_to_virt(idx); | ||
137 | |||
138 | if (idx >= __end_of_fixed_addresses) { | ||
139 | BUG(); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | set_pte_phys(address, phys, prot); | ||
144 | } | ||
145 | |||
146 | /* References to section boundaries */ | ||
147 | |||
148 | extern char _text, _etext, _edata, __bss_start, _end; | ||
149 | extern char __init_begin, __init_end; | ||
150 | |||
151 | /* | ||
152 | * paging_init() sets up the page tables | ||
153 | * | ||
154 | * This routines also unmaps the page at virtual kernel address 0, so | ||
155 | * that we can trap those pesky NULL-reference errors in the kernel. | ||
156 | */ | ||
157 | void __init paging_init(void) | ||
158 | { | ||
159 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | ||
160 | |||
161 | /* | ||
162 | * Setup some defaults for the zone sizes.. these should be safe | ||
163 | * regardless of distcontiguous memory or MMU settings. | ||
164 | */ | ||
165 | zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; | ||
166 | zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; | ||
167 | #ifdef CONFIG_HIGHMEM | ||
168 | zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; | ||
169 | #endif | ||
170 | |||
171 | #ifdef CONFIG_MMU | ||
172 | /* | ||
173 | * If we have an MMU, and want to be using it .. we need to adjust | ||
174 | * the zone sizes accordingly, in addition to turning it on. | ||
175 | */ | ||
176 | { | ||
177 | unsigned long max_dma, low, start_pfn; | ||
178 | pgd_t *pg_dir; | ||
179 | int i; | ||
180 | |||
181 | /* We don't need kernel mapping as hardware support that. */ | ||
182 | pg_dir = swapper_pg_dir; | ||
183 | |||
184 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
185 | pgd_val(pg_dir[i]) = 0; | ||
186 | |||
187 | /* Turn on the MMU */ | ||
188 | enable_mmu(); | ||
189 | |||
190 | /* Fixup the zone sizes */ | ||
191 | start_pfn = START_PFN; | ||
192 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
193 | low = MAX_LOW_PFN; | ||
194 | |||
195 | if (low < max_dma) { | ||
196 | zones_size[ZONE_DMA] = low - start_pfn; | ||
197 | zones_size[ZONE_NORMAL] = 0; | ||
198 | } else { | ||
199 | zones_size[ZONE_DMA] = max_dma - start_pfn; | ||
200 | zones_size[ZONE_NORMAL] = low - max_dma; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | ||
205 | /* | ||
206 | * If we don't have CONFIG_MMU set and the processor in question | ||
207 | * still has an MMU, care needs to be taken to make sure it doesn't | ||
208 | * stay on.. Since the boot loader could have potentially already | ||
209 | * turned it on, and we clearly don't want it, we simply turn it off. | ||
210 | * | ||
211 | * We don't need to do anything special for the zone sizes, since the | ||
212 | * default values that were already configured up above should be | ||
213 | * satisfactory. | ||
214 | */ | ||
215 | disable_mmu(); | ||
216 | #endif | ||
217 | NODE_DATA(0)->node_mem_map = NULL; | ||
218 | free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); | ||
219 | |||
220 | #ifdef CONFIG_DISCONTIGMEM | ||
221 | /* | ||
222 | * And for discontig, do some more fixups on the zone sizes.. | ||
223 | */ | ||
224 | zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT; | ||
225 | zones_size[ZONE_NORMAL] = 0; | ||
226 | free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0); | ||
227 | #endif | ||
228 | } | ||
229 | |||
230 | void __init mem_init(void) | ||
231 | { | ||
232 | extern unsigned long empty_zero_page[1024]; | ||
233 | int codesize, reservedpages, datasize, initsize; | ||
234 | int tmp; | ||
235 | extern unsigned long memory_start; | ||
236 | |||
237 | #ifdef CONFIG_MMU | ||
238 | high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); | ||
239 | #else | ||
240 | extern unsigned long memory_end; | ||
241 | |||
242 | high_memory = (void *)(memory_end & PAGE_MASK); | ||
243 | #endif | ||
244 | |||
245 | max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); | ||
246 | |||
247 | /* clear the zero-page */ | ||
248 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
249 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | ||
250 | |||
251 | /* | ||
252 | * Setup wrappers for copy/clear_page(), these will get overridden | ||
253 | * later in the boot process if a better method is available. | ||
254 | */ | ||
255 | copy_page = copy_page_slow; | ||
256 | clear_page = clear_page_slow; | ||
257 | |||
258 | /* this will put all low memory onto the freelists */ | ||
259 | totalram_pages += free_all_bootmem_node(NODE_DATA(0)); | ||
260 | #ifdef CONFIG_DISCONTIGMEM | ||
261 | totalram_pages += free_all_bootmem_node(NODE_DATA(1)); | ||
262 | #endif | ||
263 | reservedpages = 0; | ||
264 | for (tmp = 0; tmp < num_physpages; tmp++) | ||
265 | /* | ||
266 | * Only count reserved RAM pages | ||
267 | */ | ||
268 | if (PageReserved(mem_map+tmp)) | ||
269 | reservedpages++; | ||
270 | |||
271 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
272 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
273 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
274 | |||
275 | printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | ||
276 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
277 | max_mapnr << (PAGE_SHIFT-10), | ||
278 | codesize >> 10, | ||
279 | reservedpages << (PAGE_SHIFT-10), | ||
280 | datasize >> 10, | ||
281 | initsize >> 10); | ||
282 | |||
283 | p3_cache_init(); | ||
284 | } | ||
285 | |||
286 | void free_initmem(void) | ||
287 | { | ||
288 | unsigned long addr; | ||
289 | |||
290 | addr = (unsigned long)(&__init_begin); | ||
291 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
292 | ClearPageReserved(virt_to_page(addr)); | ||
293 | set_page_count(virt_to_page(addr), 1); | ||
294 | free_page(addr); | ||
295 | totalram_pages++; | ||
296 | } | ||
297 | printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); | ||
298 | } | ||
299 | |||
300 | #ifdef CONFIG_BLK_DEV_INITRD | ||
301 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
302 | { | ||
303 | unsigned long p; | ||
304 | for (p = start; p < end; p += PAGE_SIZE) { | ||
305 | ClearPageReserved(virt_to_page(p)); | ||
306 | set_page_count(virt_to_page(p), 1); | ||
307 | free_page(p); | ||
308 | totalram_pages++; | ||
309 | } | ||
310 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
311 | } | ||
312 | #endif | ||
313 | |||
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 000000000000..9f490c2742f0 --- /dev/null +++ b/arch/sh/mm/ioremap.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | |||
19 | static inline void remap_area_pte(pte_t * pte, unsigned long address, | ||
20 | unsigned long size, unsigned long phys_addr, unsigned long flags) | ||
21 | { | ||
22 | unsigned long end; | ||
23 | unsigned long pfn; | ||
24 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
25 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
26 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); | ||
27 | |||
28 | address &= ~PMD_MASK; | ||
29 | end = address + size; | ||
30 | if (end > PMD_SIZE) | ||
31 | end = PMD_SIZE; | ||
32 | if (address >= end) | ||
33 | BUG(); | ||
34 | pfn = phys_addr >> PAGE_SHIFT; | ||
35 | do { | ||
36 | if (!pte_none(*pte)) { | ||
37 | printk("remap_area_pte: page already exists\n"); | ||
38 | BUG(); | ||
39 | } | ||
40 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
41 | address += PAGE_SIZE; | ||
42 | pfn++; | ||
43 | pte++; | ||
44 | } while (address && (address < end)); | ||
45 | } | ||
46 | |||
47 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, | ||
48 | unsigned long size, unsigned long phys_addr, unsigned long flags) | ||
49 | { | ||
50 | unsigned long end; | ||
51 | |||
52 | address &= ~PGDIR_MASK; | ||
53 | end = address + size; | ||
54 | if (end > PGDIR_SIZE) | ||
55 | end = PGDIR_SIZE; | ||
56 | phys_addr -= address; | ||
57 | if (address >= end) | ||
58 | BUG(); | ||
59 | do { | ||
60 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
61 | if (!pte) | ||
62 | return -ENOMEM; | ||
63 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
64 | address = (address + PMD_SIZE) & PMD_MASK; | ||
65 | pmd++; | ||
66 | } while (address && (address < end)); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
71 | unsigned long size, unsigned long flags) | ||
72 | { | ||
73 | int error; | ||
74 | pgd_t * dir; | ||
75 | unsigned long end = address + size; | ||
76 | |||
77 | phys_addr -= address; | ||
78 | dir = pgd_offset_k(address); | ||
79 | flush_cache_all(); | ||
80 | if (address >= end) | ||
81 | BUG(); | ||
82 | spin_lock(&init_mm.page_table_lock); | ||
83 | do { | ||
84 | pmd_t *pmd; | ||
85 | pmd = pmd_alloc(&init_mm, dir, address); | ||
86 | error = -ENOMEM; | ||
87 | if (!pmd) | ||
88 | break; | ||
89 | if (remap_area_pmd(pmd, address, end - address, | ||
90 | phys_addr + address, flags)) | ||
91 | break; | ||
92 | error = 0; | ||
93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
94 | dir++; | ||
95 | } while (address && (address < end)); | ||
96 | spin_unlock(&init_mm.page_table_lock); | ||
97 | flush_tlb_all(); | ||
98 | return error; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Generic mapping function (not visible outside): | ||
103 | */ | ||
104 | |||
105 | /* | ||
106 | * Remap an arbitrary physical address space into the kernel virtual | ||
107 | * address space. Needed when the kernel wants to access high addresses | ||
108 | * directly. | ||
109 | * | ||
110 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
111 | * have to convert them into an offset in a page-aligned mapping, but the | ||
112 | * caller shouldn't need to know that small detail. | ||
113 | */ | ||
114 | void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
115 | { | ||
116 | void * addr; | ||
117 | struct vm_struct * area; | ||
118 | unsigned long offset, last_addr; | ||
119 | |||
120 | /* Don't allow wraparound or zero size */ | ||
121 | last_addr = phys_addr + size - 1; | ||
122 | if (!size || last_addr < phys_addr) | ||
123 | return NULL; | ||
124 | |||
125 | /* | ||
126 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
127 | */ | ||
128 | if (phys_addr >= 0xA0000 && last_addr < 0x100000) | ||
129 | return phys_to_virt(phys_addr); | ||
130 | |||
131 | /* | ||
132 | * Don't allow anybody to remap normal RAM that we're using.. | ||
133 | */ | ||
134 | if (phys_addr < virt_to_phys(high_memory)) | ||
135 | return NULL; | ||
136 | |||
137 | /* | ||
138 | * Mappings have to be page-aligned | ||
139 | */ | ||
140 | offset = phys_addr & ~PAGE_MASK; | ||
141 | phys_addr &= PAGE_MASK; | ||
142 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
143 | |||
144 | /* | ||
145 | * Ok, go for it.. | ||
146 | */ | ||
147 | area = get_vm_area(size, VM_IOREMAP); | ||
148 | if (!area) | ||
149 | return NULL; | ||
150 | area->phys_addr = phys_addr; | ||
151 | addr = area->addr; | ||
152 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | ||
153 | vunmap(addr); | ||
154 | return NULL; | ||
155 | } | ||
156 | return (void *) (offset + (char *)addr); | ||
157 | } | ||
158 | |||
159 | void p3_iounmap(void *addr) | ||
160 | { | ||
161 | if (addr > high_memory) | ||
162 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
163 | } | ||
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c new file mode 100644 index 000000000000..1406d2e348ca --- /dev/null +++ b/arch/sh/mm/pg-dma.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-dma.c | ||
3 | * | ||
4 | * Fast clear_page()/copy_page() implementation using the SH DMAC | ||
5 | * | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/addrspace.h> | ||
18 | #include <asm/atomic.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/io.h> | ||
22 | |||
23 | /* Channel to use for page ops, must be dual-address mode capable. */ | ||
24 | static int dma_channel = CONFIG_DMA_PAGE_OPS_CHANNEL; | ||
25 | |||
26 | static void copy_page_dma(void *to, void *from) | ||
27 | { | ||
28 | /* | ||
29 | * This doesn't seem to get triggered until further along in the | ||
30 | * boot process, at which point the DMAC is already initialized. | ||
31 | * Fix this in the same fashion as clear_page_dma() in the event | ||
32 | * that this crashes due to the DMAC not being initialized. | ||
33 | */ | ||
34 | |||
35 | flush_icache_range((unsigned long)from, PAGE_SIZE); | ||
36 | dma_write_page(dma_channel, (unsigned long)from, (unsigned long)to); | ||
37 | dma_wait_for_completion(dma_channel); | ||
38 | } | ||
39 | |||
40 | static void clear_page_dma(void *to) | ||
41 | { | ||
42 | extern unsigned long empty_zero_page[1024]; | ||
43 | |||
44 | /* | ||
45 | * We get invoked quite early on, if the DMAC hasn't been initialized | ||
46 | * yet, fall back on the slow manual implementation. | ||
47 | */ | ||
48 | if (dma_info[dma_channel].chan != dma_channel) { | ||
49 | clear_page_slow(to); | ||
50 | return; | ||
51 | } | ||
52 | |||
53 | dma_write_page(dma_channel, (unsigned long)empty_zero_page, | ||
54 | (unsigned long)to); | ||
55 | |||
56 | /* | ||
57 | * FIXME: Something is a bit racy here, if we poll the counter right | ||
58 | * away, we seem to lock. flushing the page from the dcache doesn't | ||
59 | * seem to make a difference one way or the other, though either a full | ||
60 | * icache or dcache flush does. | ||
61 | * | ||
62 | * The location of this is important as well, and must happen prior to | ||
63 | * the completion loop but after the transfer was initiated. | ||
64 | * | ||
65 | * Oddly enough, this doesn't appear to be an issue for copy_page().. | ||
66 | */ | ||
67 | flush_icache_range((unsigned long)to, PAGE_SIZE); | ||
68 | |||
69 | dma_wait_for_completion(dma_channel); | ||
70 | } | ||
71 | |||
72 | static int __init pg_dma_init(void) | ||
73 | { | ||
74 | int ret; | ||
75 | |||
76 | ret = request_dma(dma_channel, "page ops"); | ||
77 | if (ret != 0) | ||
78 | return ret; | ||
79 | |||
80 | copy_page = copy_page_dma; | ||
81 | clear_page = clear_page_dma; | ||
82 | |||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static void __exit pg_dma_exit(void) | ||
87 | { | ||
88 | free_dma(dma_channel); | ||
89 | } | ||
90 | |||
91 | module_init(pg_dma_init); | ||
92 | module_exit(pg_dma_exit); | ||
93 | |||
94 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); | ||
95 | MODULE_DESCRIPTION("Optimized page copy/clear routines using a dual-address mode capable DMAC channel"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | |||
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c new file mode 100644 index 000000000000..8f9165a4e333 --- /dev/null +++ b/arch/sh/mm/pg-nommu.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-nommu.c | ||
3 | * | ||
4 | * clear_page()/copy_page() implementation for MMUless SH. | ||
5 | * | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <asm/page.h> | ||
16 | |||
17 | static void copy_page_nommu(void *to, void *from) | ||
18 | { | ||
19 | memcpy(to, from, PAGE_SIZE); | ||
20 | } | ||
21 | |||
22 | static void clear_page_nommu(void *to) | ||
23 | { | ||
24 | memset(to, 0, PAGE_SIZE); | ||
25 | } | ||
26 | |||
27 | static int __init pg_nommu_init(void) | ||
28 | { | ||
29 | copy_page = copy_page_nommu; | ||
30 | clear_page = clear_page_nommu; | ||
31 | |||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | subsys_initcall(pg_nommu_init); | ||
36 | |||
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c new file mode 100644 index 000000000000..e5907c7330e5 --- /dev/null +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh4.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/config.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mman.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <asm/addrspace.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | extern struct semaphore p3map_sem[]; | ||
26 | |||
27 | /* | ||
28 | * clear_user_page | ||
29 | * @to: P1 address | ||
30 | * @address: U0 address to be mapped | ||
31 | * @page: page (virt_to_page(to)) | ||
32 | */ | ||
33 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
34 | { | ||
35 | __set_bit(PG_mapped, &page->flags); | ||
36 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | ||
37 | clear_page(to); | ||
38 | else { | ||
39 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
40 | _PAGE_RW | _PAGE_CACHABLE | | ||
41 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
42 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
43 | unsigned long phys_addr = PHYSADDR(to); | ||
44 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | ||
45 | pgd_t *dir = pgd_offset_k(p3_addr); | ||
46 | pmd_t *pmd = pmd_offset(dir, p3_addr); | ||
47 | pte_t *pte = pte_offset_kernel(pmd, p3_addr); | ||
48 | pte_t entry; | ||
49 | unsigned long flags; | ||
50 | |||
51 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | ||
52 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | ||
53 | set_pte(pte, entry); | ||
54 | local_irq_save(flags); | ||
55 | __flush_tlb_page(get_asid(), p3_addr); | ||
56 | local_irq_restore(flags); | ||
57 | update_mmu_cache(NULL, p3_addr, entry); | ||
58 | __clear_user_page((void *)p3_addr, to); | ||
59 | pte_clear(&init_mm, p3_addr, pte); | ||
60 | up(&p3map_sem[(address & CACHE_ALIAS)>>12]); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * copy_user_page | ||
66 | * @to: P1 address | ||
67 | * @from: P1 address | ||
68 | * @address: U0 address to be mapped | ||
69 | * @page: page (virt_to_page(to)) | ||
70 | */ | ||
71 | void copy_user_page(void *to, void *from, unsigned long address, | ||
72 | struct page *page) | ||
73 | { | ||
74 | __set_bit(PG_mapped, &page->flags); | ||
75 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | ||
76 | copy_page(to, from); | ||
77 | else { | ||
78 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
79 | _PAGE_RW | _PAGE_CACHABLE | | ||
80 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
81 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
82 | unsigned long phys_addr = PHYSADDR(to); | ||
83 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | ||
84 | pgd_t *dir = pgd_offset_k(p3_addr); | ||
85 | pmd_t *pmd = pmd_offset(dir, p3_addr); | ||
86 | pte_t *pte = pte_offset_kernel(pmd, p3_addr); | ||
87 | pte_t entry; | ||
88 | unsigned long flags; | ||
89 | |||
90 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | ||
91 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | ||
92 | set_pte(pte, entry); | ||
93 | local_irq_save(flags); | ||
94 | __flush_tlb_page(get_asid(), p3_addr); | ||
95 | local_irq_restore(flags); | ||
96 | update_mmu_cache(NULL, p3_addr, entry); | ||
97 | __copy_user_page((void *)p3_addr, from, to); | ||
98 | pte_clear(&init_mm, p3_addr, pte); | ||
99 | up(&p3map_sem[(address & CACHE_ALIAS)>>12]); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * For SH-4, we have our own implementation for ptep_get_and_clear | ||
105 | */ | ||
106 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
107 | { | ||
108 | pte_t pte = *ptep; | ||
109 | |||
110 | pte_clear(mm, addr, ptep); | ||
111 | if (!pte_not_present(pte)) { | ||
112 | unsigned long pfn = pte_pfn(pte); | ||
113 | if (pfn_valid(pfn)) { | ||
114 | struct page *page = pfn_to_page(pfn); | ||
115 | struct address_space *mapping = page_mapping(page); | ||
116 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
117 | __clear_bit(PG_mapped, &page->flags); | ||
118 | } | ||
119 | } | ||
120 | return pte; | ||
121 | } | ||
122 | |||
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c new file mode 100644 index 000000000000..ff9ece986cbc --- /dev/null +++ b/arch/sh/mm/pg-sh7705.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh7705.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2004 Alex Song | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <asm/addrspace.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/cache.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | |||
28 | static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) | ||
29 | { | ||
30 | unsigned long v; | ||
31 | unsigned long begin, end; | ||
32 | unsigned long p1_begin; | ||
33 | |||
34 | |||
35 | begin = L1_CACHE_ALIGN((unsigned long)virt); | ||
36 | end = L1_CACHE_ALIGN((unsigned long)virt + size); | ||
37 | |||
38 | p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1); | ||
39 | |||
40 | /* do this the slow way as we may not have TLB entries | ||
41 | * for virt yet. */ | ||
42 | for (v = begin; v < end; v += L1_CACHE_BYTES) { | ||
43 | unsigned long p; | ||
44 | unsigned long ways, addr; | ||
45 | |||
46 | p = __pa(p1_begin); | ||
47 | |||
48 | ways = cpu_data->dcache.ways; | ||
49 | addr = CACHE_OC_ADDRESS_ARRAY; | ||
50 | |||
51 | do { | ||
52 | unsigned long data; | ||
53 | |||
54 | addr |= (v & cpu_data->dcache.entry_mask); | ||
55 | |||
56 | data = ctrl_inl(addr); | ||
57 | if ((data & CACHE_PHYSADDR_MASK) == | ||
58 | (p & CACHE_PHYSADDR_MASK)) { | ||
59 | data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID); | ||
60 | ctrl_outl(data, addr); | ||
61 | } | ||
62 | |||
63 | addr += cpu_data->dcache.way_incr; | ||
64 | } while (--ways); | ||
65 | |||
66 | p1_begin += L1_CACHE_BYTES; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * clear_user_page | ||
72 | * @to: P1 address | ||
73 | * @address: U0 address to be mapped | ||
74 | */ | ||
75 | void clear_user_page(void *to, unsigned long address, struct page *pg) | ||
76 | { | ||
77 | struct page *page = virt_to_page(to); | ||
78 | |||
79 | __set_bit(PG_mapped, &page->flags); | ||
80 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
81 | clear_page(to); | ||
82 | __flush_wback_region(to, PAGE_SIZE); | ||
83 | } else { | ||
84 | __flush_purge_virtual_region(to, | ||
85 | (void *)(address & 0xfffff000), | ||
86 | PAGE_SIZE); | ||
87 | clear_page(to); | ||
88 | __flush_wback_region(to, PAGE_SIZE); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * copy_user_page | ||
94 | * @to: P1 address | ||
95 | * @from: P1 address | ||
96 | * @address: U0 address to be mapped | ||
97 | */ | ||
98 | void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) | ||
99 | { | ||
100 | struct page *page = virt_to_page(to); | ||
101 | |||
102 | |||
103 | __set_bit(PG_mapped, &page->flags); | ||
104 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
105 | copy_page(to, from); | ||
106 | __flush_wback_region(to, PAGE_SIZE); | ||
107 | } else { | ||
108 | __flush_purge_virtual_region(to, | ||
109 | (void *)(address & 0xfffff000), | ||
110 | PAGE_SIZE); | ||
111 | copy_page(to, from); | ||
112 | __flush_wback_region(to, PAGE_SIZE); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * For SH7705, we have our own implementation for ptep_get_and_clear | ||
118 | * Copied from pg-sh4.c | ||
119 | */ | ||
120 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
121 | { | ||
122 | pte_t pte = *ptep; | ||
123 | |||
124 | pte_clear(mm, addr, ptep); | ||
125 | if (!pte_not_present(pte)) { | ||
126 | unsigned long pfn = pte_pfn(pte); | ||
127 | if (pfn_valid(pfn)) { | ||
128 | struct page *page = pfn_to_page(pfn); | ||
129 | struct address_space *mapping = page_mapping(page); | ||
130 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
131 | __clear_bit(PG_mapped, &page->flags); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return pte; | ||
136 | } | ||
137 | |||
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c new file mode 100644 index 000000000000..e55cfea01092 --- /dev/null +++ b/arch/sh/mm/tlb-nommu.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/tlb-nommu.c | ||
3 | * | ||
4 | * TLB Operations for MMUless SH. | ||
5 | * | ||
6 | * Copyright (C) 2002 Paul Mundt | ||
7 | * | ||
8 | * Released under the terms of the GNU GPL v2.0. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/mm.h> | ||
12 | |||
13 | /* | ||
14 | * Nothing too terribly exciting here .. | ||
15 | */ | ||
16 | |||
17 | void flush_tlb(void) | ||
18 | { | ||
19 | BUG(); | ||
20 | } | ||
21 | |||
22 | void flush_tlb_all(void) | ||
23 | { | ||
24 | BUG(); | ||
25 | } | ||
26 | |||
27 | void flush_tlb_mm(struct mm_struct *mm) | ||
28 | { | ||
29 | BUG(); | ||
30 | } | ||
31 | |||
32 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
33 | unsigned long end) | ||
34 | { | ||
35 | BUG(); | ||
36 | } | ||
37 | |||
38 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
39 | { | ||
40 | BUG(); | ||
41 | } | ||
42 | |||
43 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
44 | { | ||
45 | BUG(); | ||
46 | } | ||
47 | |||
48 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
49 | { | ||
50 | BUG(); | ||
51 | } | ||
52 | |||
53 | void update_mmu_cache(struct vm_area_struct * vma, | ||
54 | unsigned long address, pte_t pte) | ||
55 | { | ||
56 | BUG(); | ||
57 | } | ||
58 | |||
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c new file mode 100644 index 000000000000..7a0d5c10bf20 --- /dev/null +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/tlb-sh3.c | ||
3 | * | ||
4 | * SH-3 specific TLB operations | ||
5 | * | ||
6 | * Copyright (C) 1999 Niibe Yutaka | ||
7 | * Copyright (C) 2002 Paul Mundt | ||
8 | * | ||
9 | * Released under the terms of the GNU GPL v2.0. | ||
10 | */ | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | |||
24 | #include <asm/system.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/cacheflush.h> | ||
30 | |||
31 | void update_mmu_cache(struct vm_area_struct * vma, | ||
32 | unsigned long address, pte_t pte) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | unsigned long pteval; | ||
36 | unsigned long vpn; | ||
37 | |||
38 | /* Ptrace may call this routine. */ | ||
39 | if (vma && current->active_mm != vma->vm_mm) | ||
40 | return; | ||
41 | |||
42 | #if defined(CONFIG_SH7705_CACHE_32KB) | ||
43 | struct page *page; | ||
44 | page = pte_page(pte); | ||
45 | if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) { | ||
46 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
47 | __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); | ||
48 | __set_bit(PG_mapped, &page->flags); | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | local_irq_save(flags); | ||
53 | |||
54 | /* Set PTEH register */ | ||
55 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
56 | ctrl_outl(vpn, MMU_PTEH); | ||
57 | |||
58 | pteval = pte_val(pte); | ||
59 | |||
60 | /* Set PTEL register */ | ||
61 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
62 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
63 | ctrl_outl(pteval, MMU_PTEL); | ||
64 | |||
65 | /* Load the TLB */ | ||
66 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
67 | local_irq_restore(flags); | ||
68 | } | ||
69 | |||
70 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
71 | { | ||
72 | unsigned long addr, data; | ||
73 | int i, ways = MMU_NTLB_WAYS; | ||
74 | |||
75 | /* | ||
76 | * NOTE: PTEH.ASID should be set to this MM | ||
77 | * _AND_ we need to write ASID to the array. | ||
78 | * | ||
79 | * It would be simple if we didn't need to set PTEH.ASID... | ||
80 | */ | ||
81 | addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); | ||
82 | data = (page & 0xfffe0000) | asid; /* VALID bit is off */ | ||
83 | |||
84 | if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { | ||
85 | addr |= MMU_PAGE_ASSOC_BIT; | ||
86 | ways = 1; /* we already know the way .. */ | ||
87 | } | ||
88 | |||
89 | for (i = 0; i < ways; i++) | ||
90 | ctrl_outl(data, addr + (i << 8)); | ||
91 | } | ||
92 | |||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c new file mode 100644 index 000000000000..115b1b6be40b --- /dev/null +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/tlb-sh4.c | ||
3 | * | ||
4 | * SH-4 specific TLB operations | ||
5 | * | ||
6 | * Copyright (C) 1999 Niibe Yutaka | ||
7 | * Copyright (C) 2002 Paul Mundt | ||
8 | * | ||
9 | * Released under the terms of the GNU GPL v2.0. | ||
10 | */ | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | |||
24 | #include <asm/system.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/cacheflush.h> | ||
30 | |||
31 | void update_mmu_cache(struct vm_area_struct * vma, | ||
32 | unsigned long address, pte_t pte) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | unsigned long pteval; | ||
36 | unsigned long vpn; | ||
37 | struct page *page; | ||
38 | unsigned long pfn; | ||
39 | unsigned long ptea; | ||
40 | |||
41 | /* Ptrace may call this routine. */ | ||
42 | if (vma && current->active_mm != vma->vm_mm) | ||
43 | return; | ||
44 | |||
45 | pfn = pte_pfn(pte); | ||
46 | if (pfn_valid(pfn)) { | ||
47 | page = pfn_to_page(pfn); | ||
48 | if (!test_bit(PG_mapped, &page->flags)) { | ||
49 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
50 | __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); | ||
51 | __set_bit(PG_mapped, &page->flags); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | local_irq_save(flags); | ||
56 | |||
57 | /* Set PTEH register */ | ||
58 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
59 | ctrl_outl(vpn, MMU_PTEH); | ||
60 | |||
61 | pteval = pte_val(pte); | ||
62 | /* Set PTEA register */ | ||
63 | /* TODO: make this look less hacky */ | ||
64 | ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1); | ||
65 | ctrl_outl(ptea, MMU_PTEA); | ||
66 | |||
67 | /* Set PTEL register */ | ||
68 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
69 | #ifdef CONFIG_SH_WRITETHROUGH | ||
70 | pteval |= _PAGE_WT; | ||
71 | #endif | ||
72 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
73 | ctrl_outl(pteval, MMU_PTEL); | ||
74 | |||
75 | /* Load the TLB */ | ||
76 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
81 | { | ||
82 | unsigned long addr, data; | ||
83 | |||
84 | /* | ||
85 | * NOTE: PTEH.ASID should be set to this MM | ||
86 | * _AND_ we need to write ASID to the array. | ||
87 | * | ||
88 | * It would be simple if we didn't need to set PTEH.ASID... | ||
89 | */ | ||
90 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; | ||
91 | data = page | asid; /* VALID bit is off */ | ||
92 | jump_to_P2(); | ||
93 | ctrl_outl(data, addr); | ||
94 | back_to_P1(); | ||
95 | } | ||
96 | |||