aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 02:13:36 -0400
commit26ff6c11ef38e08990c1e417c299246e6ab18ff7 (patch)
treeebd37fd0270b7c7dfe8474a046663db78fcdb1ab
parent9359e757709a211040e4b0151eae69248e7c6eca (diff)
sh: page table alloc cleanups and page fault optimizations.
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/fault.c202
-rw-r--r--arch/sh/mm/init.c13
-rw-r--r--arch/sh/mm/tlb-flush.c132
-rw-r--r--include/asm-sh/cache.h8
-rw-r--r--include/asm-sh/cacheflush.h1
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h8
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h29
-rw-r--r--include/asm-sh/page.h12
-rw-r--r--include/asm-sh/pgalloc.h37
-rw-r--r--include/asm-sh/pgtable.h80
13 files changed, 261 insertions, 267 deletions
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 917b2f32f260..d8bcd8a22327 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -21,7 +21,7 @@
21#include <linux/mman.h> 21#include <linux/mman.h>
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/utsname.h> 23#include <linux/utsname.h>
24 24#include <asm/cacheflush.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/ipc.h> 26#include <asm/ipc.h>
27 27
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index f4e32b3d24dc..d90906367c5f 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o
12obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 12obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
13 13
14mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o 14mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o
15mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o 15mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o
16 16
17obj-y += $(mmu-y) 17obj-y += $(mmu-y)
18 18
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index ee73e30263af..c81e6b67ad30 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -9,6 +9,8 @@
9 */ 9 */
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <asm/cacheflush.h>
13#include <asm/addrspace.h>
12#include <asm/io.h> 14#include <asm/io.h>
13 15
14void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) 16void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 364181f27b79..7a03ffe6dadd 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -1,33 +1,20 @@
1/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ 1/*
2 * Page fault handler for SH with an MMU.
2 * 3 *
3 * linux/arch/sh/mm/fault.c
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt 5 * Copyright (C) 2003 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
9 */ 13 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h> 15#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24
25#include <asm/system.h> 16#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/pgalloc.h>
29#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
30#include <asm/cacheflush.h>
31#include <asm/kgdb.h> 18#include <asm/kgdb.h>
32 19
33extern void die(const char *,struct pt_regs *,long); 20extern void die(const char *,struct pt_regs *,long);
@@ -187,14 +174,25 @@ do_sigbus:
187 goto no_context; 174 goto no_context;
188} 175}
189 176
177#ifdef CONFIG_SH_STORE_QUEUES
190/* 178/*
191 * Called with interrupt disabled. 179 * This is a special case for the SH-4 store queues, as pages for this
180 * space still need to be faulted in before it's possible to flush the
181 * store queue cache for writeout to the remapped region.
182 */
183#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
184#else
185#define P3_ADDR_MAX P4SEG
186#endif
187
188/*
189 * Called with interrupts disabled.
192 */ 190 */
193asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 191asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
194 unsigned long address) 192 unsigned long address)
195{ 193{
196 unsigned long addrmax = P4SEG;
197 pgd_t *pgd; 194 pgd_t *pgd;
195 pud_t *pud;
198 pmd_t *pmd; 196 pmd_t *pmd;
199 pte_t *pte; 197 pte_t *pte;
200 pte_t entry; 198 pte_t entry;
@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
207 kgdb_bus_err_hook(); 205 kgdb_bus_err_hook();
208#endif 206#endif
209 207
210#ifdef CONFIG_SH_STORE_QUEUES 208 /*
211 addrmax = P4SEG_STORE_QUE + 0x04000000; 209 * We don't take page faults for P1, P2, and parts of P4, these
212#endif 210 * are always mapped, whether it be due to legacy behaviour in
213 211 * 29-bit mode, or due to PMB configuration in 32-bit mode.
214 if (address >= P3SEG && address < addrmax) { 212 */
213 if (address >= P3SEG && address < P3_ADDR_MAX)
215 pgd = pgd_offset_k(address); 214 pgd = pgd_offset_k(address);
216 mm = NULL; 215 else {
217 } else if (address >= TASK_SIZE) 216 if (unlikely(address >= TASK_SIZE || !current->mm))
218 return 1; 217 return 1;
219 else if (!(mm = current->mm)) 218
220 return 1; 219 pgd = pgd_offset(current->mm, address);
221 else 220 }
222 pgd = pgd_offset(mm, address);
223 221
224 pmd = pmd_offset(pgd, address); 222 pud = pud_offset(pgd, address);
223 if (pud_none_or_clear_bad(pud))
224 return 1;
225 pmd = pmd_offset(pud, address);
225 if (pmd_none_or_clear_bad(pmd)) 226 if (pmd_none_or_clear_bad(pmd))
226 return 1; 227 return 1;
228
227 if (mm) 229 if (mm)
228 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 230 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 else 231 else
230 pte = pte_offset_kernel(pmd, address); 232 pte = pte_offset_kernel(pmd, address);
231 233
232 entry = *pte; 234 entry = *pte;
233 if (pte_none(entry) || pte_not_present(entry) 235 if (unlikely(pte_none(entry) || pte_not_present(entry)))
234 || (writeaccess && !pte_write(entry))) 236 goto unlock;
237 if (unlikely(writeaccess && !pte_write(entry)))
235 goto unlock; 238 goto unlock;
236 239
237 if (writeaccess) 240 if (writeaccess)
@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
243 * ITLB is not affected by "ldtlb" instruction. 246 * ITLB is not affected by "ldtlb" instruction.
244 * So, we need to flush the entry by ourselves. 247 * So, we need to flush the entry by ourselves.
245 */ 248 */
246 249 __flush_tlb_page(get_asid(), address & PAGE_MASK);
247 {
248 unsigned long flags;
249 local_irq_save(flags);
250 __flush_tlb_page(get_asid(), address&PAGE_MASK);
251 local_irq_restore(flags);
252 }
253#endif 250#endif
254 251
255 set_pte(pte, entry); 252 set_pte(pte, entry);
@@ -260,122 +257,3 @@ unlock:
260 pte_unmap_unlock(pte, ptl); 257 pte_unmap_unlock(pte, ptl);
261 return ret; 258 return ret;
262} 259}
263
264void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
265{
266 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
267 unsigned long flags;
268 unsigned long asid;
269 unsigned long saved_asid = MMU_NO_ASID;
270
271 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
272 page &= PAGE_MASK;
273
274 local_irq_save(flags);
275 if (vma->vm_mm != current->mm) {
276 saved_asid = get_asid();
277 set_asid(asid);
278 }
279 __flush_tlb_page(asid, page);
280 if (saved_asid != MMU_NO_ASID)
281 set_asid(saved_asid);
282 local_irq_restore(flags);
283 }
284}
285
286void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
287 unsigned long end)
288{
289 struct mm_struct *mm = vma->vm_mm;
290
291 if (mm->context != NO_CONTEXT) {
292 unsigned long flags;
293 int size;
294
295 local_irq_save(flags);
296 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
297 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
298 mm->context = NO_CONTEXT;
299 if (mm == current->mm)
300 activate_context(mm);
301 } else {
302 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
303 unsigned long saved_asid = MMU_NO_ASID;
304
305 start &= PAGE_MASK;
306 end += (PAGE_SIZE - 1);
307 end &= PAGE_MASK;
308 if (mm != current->mm) {
309 saved_asid = get_asid();
310 set_asid(asid);
311 }
312 while (start < end) {
313 __flush_tlb_page(asid, start);
314 start += PAGE_SIZE;
315 }
316 if (saved_asid != MMU_NO_ASID)
317 set_asid(saved_asid);
318 }
319 local_irq_restore(flags);
320 }
321}
322
323void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324{
325 unsigned long flags;
326 int size;
327
328 local_irq_save(flags);
329 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
330 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
331 flush_tlb_all();
332 } else {
333 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
334 unsigned long saved_asid = get_asid();
335
336 start &= PAGE_MASK;
337 end += (PAGE_SIZE - 1);
338 end &= PAGE_MASK;
339 set_asid(asid);
340 while (start < end) {
341 __flush_tlb_page(asid, start);
342 start += PAGE_SIZE;
343 }
344 set_asid(saved_asid);
345 }
346 local_irq_restore(flags);
347}
348
349void flush_tlb_mm(struct mm_struct *mm)
350{
351 /* Invalidate all TLB of this process. */
352 /* Instead of invalidating each TLB, we get new MMU context. */
353 if (mm->context != NO_CONTEXT) {
354 unsigned long flags;
355
356 local_irq_save(flags);
357 mm->context = NO_CONTEXT;
358 if (mm == current->mm)
359 activate_context(mm);
360 local_irq_restore(flags);
361 }
362}
363
364void flush_tlb_all(void)
365{
366 unsigned long flags, status;
367
368 /*
369 * Flush all the TLB.
370 *
371 * Write to the MMU control register's bit:
372 * TF-bit for SH-3, TI-bit for SH-4.
373 * It's same position, bit #2.
374 */
375 local_irq_save(flags);
376 status = ctrl_inl(MMUCR);
377 status |= 0x04;
378 ctrl_outl(status, MMUCR);
379 ctrl_barrier();
380 local_irq_restore(flags);
381}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8ea27ca4b700..d1a979eab656 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -80,6 +80,7 @@ void show_mem(void)
80static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 80static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
81{ 81{
82 pgd_t *pgd; 82 pgd_t *pgd;
83 pud_t *pud;
83 pmd_t *pmd; 84 pmd_t *pmd;
84 pte_t *pte; 85 pte_t *pte;
85 86
@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
89 return; 90 return;
90 } 91 }
91 92
92 pmd = pmd_offset(pgd, addr); 93 pud = pud_offset(pgd, addr);
94 if (pud_none(*pud)) {
95 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
96 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
97 if (pmd != pmd_offset(pud, 0)) {
98 pud_ERROR(*pud);
99 return;
100 }
101 }
102
103 pmd = pmd_offset(pud, addr);
93 if (pmd_none(*pmd)) { 104 if (pmd_none(*pmd)) {
94 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); 105 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
95 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); 106 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
new file mode 100644
index 000000000000..fd7e42bcaa40
--- /dev/null
+++ b/arch/sh/mm/tlb-flush.c
@@ -0,0 +1,132 @@
1/*
2 * TLB flushing operations for SH with an MMU.
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/mm.h>
12#include <asm/mmu_context.h>
13#include <asm/tlbflush.h>
14
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{
17 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
18 unsigned long flags;
19 unsigned long asid;
20 unsigned long saved_asid = MMU_NO_ASID;
21
22 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
23 page &= PAGE_MASK;
24
25 local_irq_save(flags);
26 if (vma->vm_mm != current->mm) {
27 saved_asid = get_asid();
28 set_asid(asid);
29 }
30 __flush_tlb_page(asid, page);
31 if (saved_asid != MMU_NO_ASID)
32 set_asid(saved_asid);
33 local_irq_restore(flags);
34 }
35}
36
37void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
38 unsigned long end)
39{
40 struct mm_struct *mm = vma->vm_mm;
41
42 if (mm->context != NO_CONTEXT) {
43 unsigned long flags;
44 int size;
45
46 local_irq_save(flags);
47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49 mm->context = NO_CONTEXT;
50 if (mm == current->mm)
51 activate_context(mm);
52 } else {
53 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
54 unsigned long saved_asid = MMU_NO_ASID;
55
56 start &= PAGE_MASK;
57 end += (PAGE_SIZE - 1);
58 end &= PAGE_MASK;
59 if (mm != current->mm) {
60 saved_asid = get_asid();
61 set_asid(asid);
62 }
63 while (start < end) {
64 __flush_tlb_page(asid, start);
65 start += PAGE_SIZE;
66 }
67 if (saved_asid != MMU_NO_ASID)
68 set_asid(saved_asid);
69 }
70 local_irq_restore(flags);
71 }
72}
73
74void flush_tlb_kernel_range(unsigned long start, unsigned long end)
75{
76 unsigned long flags;
77 int size;
78
79 local_irq_save(flags);
80 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
81 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
82 flush_tlb_all();
83 } else {
84 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
85 unsigned long saved_asid = get_asid();
86
87 start &= PAGE_MASK;
88 end += (PAGE_SIZE - 1);
89 end &= PAGE_MASK;
90 set_asid(asid);
91 while (start < end) {
92 __flush_tlb_page(asid, start);
93 start += PAGE_SIZE;
94 }
95 set_asid(saved_asid);
96 }
97 local_irq_restore(flags);
98}
99
100void flush_tlb_mm(struct mm_struct *mm)
101{
102 /* Invalidate all TLB of this process. */
103 /* Instead of invalidating each TLB, we get new MMU context. */
104 if (mm->context != NO_CONTEXT) {
105 unsigned long flags;
106
107 local_irq_save(flags);
108 mm->context = NO_CONTEXT;
109 if (mm == current->mm)
110 activate_context(mm);
111 local_irq_restore(flags);
112 }
113}
114
115void flush_tlb_all(void)
116{
117 unsigned long flags, status;
118
119 /*
120 * Flush all the TLB.
121 *
122 * Write to the MMU control register's bit:
123 * TF-bit for SH-3, TI-bit for SH-4.
124 * It's same position, bit #2.
125 */
126 local_irq_save(flags);
127 status = ctrl_inl(MMUCR);
128 status |= 0x04;
129 ctrl_outl(status, MMUCR);
130 ctrl_barrier();
131 local_irq_restore(flags);
132}
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index 33f13367054b..e3a180cf5062 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -10,7 +10,6 @@
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <asm/cpu/cache.h> 12#include <asm/cpu/cache.h>
13#include <asm/cpu/cacheflush.h>
14 13
15#define SH_CACHE_VALID 1 14#define SH_CACHE_VALID 1
16#define SH_CACHE_UPDATED 2 15#define SH_CACHE_UPDATED 2
@@ -49,12 +48,5 @@ struct cache_info {
49 unsigned long flags; 48 unsigned long flags;
50}; 49};
51 50
52/* Flush (write-back only) a region (smaller than a page) */
53extern void __flush_wback_region(void *start, int size);
54/* Flush (write-back & invalidate) a region (smaller than a page) */
55extern void __flush_purge_region(void *start, int size);
56/* Flush (invalidate only) a region (smaller than a page) */
57extern void __flush_invalidate_region(void *start, int size);
58
59#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
60#endif /* __ASM_SH_CACHE_H */ 52#endif /* __ASM_SH_CACHE_H */
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index 9dfb33edb008..92930b4a40d4 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -2,6 +2,7 @@
2#define __ASM_SH_CACHEFLUSH_H 2#define __ASM_SH_CACHEFLUSH_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/mm.h>
5#include <asm/cpu/cacheflush.h> 6#include <asm/cpu/cacheflush.h>
6 7
7/* Flush (write-back only) a region (smaller than a page) */ 8/* Flush (write-back only) a region (smaller than a page) */
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index f51aed00c68f..db0cb071ea8e 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -10,7 +10,7 @@
10#ifndef __ASM_CPU_SH3_CACHEFLUSH_H 10#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
11#define __ASM_CPU_SH3_CACHEFLUSH_H 11#define __ASM_CPU_SH3_CACHEFLUSH_H
12 12
13/* 13/*
14 * Cache flushing: 14 * Cache flushing:
15 * 15 *
16 * - flush_cache_all() flushes entire cache 16 * - flush_cache_all() flushes entire cache
@@ -35,10 +35,6 @@
35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ 35 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
36#define CACHE_ALIAS 0x00001000 36#define CACHE_ALIAS 0x00001000
37 37
38struct page;
39struct mm_struct;
40struct vm_area_struct;
41
42extern void flush_cache_all(void); 38extern void flush_cache_all(void);
43extern void flush_cache_mm(struct mm_struct *mm); 39extern void flush_cache_mm(struct mm_struct *mm);
44extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 40extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
@@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
79 75
80#define p3_cache_init() do { } while (0) 76#define p3_cache_init() do { } while (0)
81 77
82#define HAVE_ARCH_UNMAPPED_AREA
83
84#endif 78#endif
85 79
86#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ 80#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index ea58c4c5944d..a95fc951aff6 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -16,30 +16,26 @@
16 * caching; in which case they're only semi-broken), 16 * caching; in which case they're only semi-broken),
17 * so we need them. 17 * so we need them.
18 */ 18 */
19struct page; 19void flush_cache_all(void);
20struct mm_struct; 20void flush_cache_mm(struct mm_struct *mm);
21struct vm_area_struct; 21void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
22 22 unsigned long end);
23extern void flush_cache_all(void); 23void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
24extern void flush_cache_mm(struct mm_struct *mm); 24 unsigned long pfn);
25extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 25void flush_dcache_page(struct page *pg);
26 unsigned long end);
27extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
28extern void flush_dcache_page(struct page *pg);
29 26
30#define flush_dcache_mmap_lock(mapping) do { } while (0) 27#define flush_dcache_mmap_lock(mapping) do { } while (0)
31#define flush_dcache_mmap_unlock(mapping) do { } while (0) 28#define flush_dcache_mmap_unlock(mapping) do { } while (0)
32 29
33extern void flush_icache_range(unsigned long start, unsigned long end); 30void flush_icache_range(unsigned long start, unsigned long end);
34extern void flush_cache_sigtramp(unsigned long addr); 31void flush_cache_sigtramp(unsigned long addr);
35extern void flush_icache_user_range(struct vm_area_struct *vma, 32void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
36 struct page *page, unsigned long addr, 33 unsigned long addr, int len);
37 int len);
38 34
39#define flush_icache_page(vma,pg) do { } while (0) 35#define flush_icache_page(vma,pg) do { } while (0)
40 36
41/* Initialization of P3 area for copy_user_page */ 37/* Initialization of P3 area for copy_user_page */
42extern void p3_cache_init(void); 38void p3_cache_init(void);
43 39
44#define PG_mapped PG_arch_1 40#define PG_mapped PG_arch_1
45 41
@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
57} 53}
58#endif /* CONFIG_MMU */ 54#endif /* CONFIG_MMU */
59#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 55#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
60
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 4811d410d123..51d7281a546a 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from);
41extern void clear_page_slow(void *to); 41extern void clear_page_slow(void *to);
42extern void copy_page_slow(void *to, void *from); 42extern void copy_page_slow(void *to, void *from);
43 43
44#if defined(CONFIG_SH7705_CACHE_32KB) && defined(CONFIG_MMU) 44#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
45 defined(CONFIG_SH7705_CACHE_32KB))
45struct page; 46struct page;
46extern void clear_user_page(void *to, unsigned long address, struct page *pg); 47extern void clear_user_page(void *to, unsigned long address, struct page *pg);
47extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); 48extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
@@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to);
50#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) 51#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU)
51#define clear_user_page(page, vaddr, pg) clear_page(page) 52#define clear_user_page(page, vaddr, pg) clear_page(page)
52#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 53#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
53#elif defined(CONFIG_CPU_SH4)
54struct page;
55extern void clear_user_page(void *to, unsigned long address, struct page *pg);
56extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
57extern void __clear_user_page(void *to, void *orig_to);
58extern void __copy_user_page(void *to, void *from, void *orig_to);
59#endif 54#endif
60 55
61/* 56/*
62 * These are used to make use of C type-checking.. 57 * These are used to make use of C type-checking..
63 */ 58 */
64typedef struct { unsigned long pte; } pte_t; 59typedef struct { unsigned long pte; } pte_t;
65typedef struct { unsigned long pmd; } pmd_t;
66typedef struct { unsigned long pgd; } pgd_t; 60typedef struct { unsigned long pgd; } pgd_t;
67typedef struct { unsigned long pgprot; } pgprot_t; 61typedef struct { unsigned long pgprot; } pgprot_t;
68 62
69#define pte_val(x) ((x).pte) 63#define pte_val(x) ((x).pte)
70#define pmd_val(x) ((x).pmd)
71#define pgd_val(x) ((x).pgd) 64#define pgd_val(x) ((x).pgd)
72#define pgprot_val(x) ((x).pgprot) 65#define pgprot_val(x) ((x).pgprot)
73 66
74#define __pte(x) ((pte_t) { (x) } ) 67#define __pte(x) ((pte_t) { (x) } )
75#define __pmd(x) ((pmd_t) { (x) } )
76#define __pgd(x) ((pgd_t) { (x) } ) 68#define __pgd(x) ((pgd_t) { (x) } )
77#define __pgprot(x) ((pgprot_t) { (x) } ) 69#define __pgprot(x) ((pgprot_t) { (x) } )
78 70
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index f4f233f7a4f5..e841465ab4d2 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -1,15 +1,6 @@
1#ifndef __ASM_SH_PGALLOC_H 1#ifndef __ASM_SH_PGALLOC_H
2#define __ASM_SH_PGALLOC_H 2#define __ASM_SH_PGALLOC_H
3 3
4#include <linux/threads.h>
5#include <linux/slab.h>
6#include <linux/mm.h>
7
8#define pgd_quicklist ((unsigned long *)0)
9#define pmd_quicklist ((unsigned long *)0)
10#define pte_quicklist ((unsigned long *)0)
11#define pgtable_cache_size 0L
12
13#define pmd_populate_kernel(mm, pmd, pte) \ 4#define pmd_populate_kernel(mm, pmd, pte) \
14 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) 5 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
15 6
@@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
24 */ 15 */
25static inline pgd_t *pgd_alloc(struct mm_struct *mm) 16static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26{ 17{
27 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t)); 18 return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
28 pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
29
30 if (pgd)
31 memset(pgd, 0, pgd_size);
32
33 return pgd;
34} 19}
35 20
36static inline void pgd_free(pgd_t *pgd) 21static inline void pgd_free(pgd_t *pgd)
37{ 22{
38 kfree(pgd); 23 free_page((unsigned long)pgd);
39} 24}
40 25
41static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 26static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42 unsigned long address) 27 unsigned long address)
43{ 28{
44 pte_t *pte; 29 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
45
46 pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
47
48 return pte;
49} 30}
50 31
51static inline struct page *pte_alloc_one(struct mm_struct *mm, 32static inline struct page *pte_alloc_one(struct mm_struct *mm,
52 unsigned long address) 33 unsigned long address)
53{ 34{
54 struct page *pte; 35 return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
55
56 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
57
58 return pte;
59} 36}
60 37
61static inline void pte_free_kernel(pte_t *pte) 38static inline void pte_free_kernel(pte_t *pte)
@@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte)
75 * inside the pgd, so has no extra memory associated with it. 52 * inside the pgd, so has no extra memory associated with it.
76 */ 53 */
77 54
78#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
79#define pmd_free(x) do { } while (0) 55#define pmd_free(x) do { } while (0)
80#define __pmd_free_tlb(tlb,x) do { } while (0) 56#define __pmd_free_tlb(tlb,x) do { } while (0)
81#define pgd_populate(mm, pmd, pte) BUG()
82#define check_pgt_cache() do { } while (0) 57#define check_pgt_cache() do { } while (0)
83 58
84#ifdef CONFIG_CPU_SH4
85#define PG_mapped PG_arch_1
86#endif
87
88#endif /* __ASM_SH_PGALLOC_H */ 59#endif /* __ASM_SH_PGALLOC_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 40d41a78041e..9728b58f7c13 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -1,42 +1,42 @@
1#ifndef __ASM_SH_PGTABLE_H
2#define __ASM_SH_PGTABLE_H
3
4#include <asm-generic/4level-fixup.h>
5
6/* 1/*
2 * This file contains the functions and defines necessary to modify and
3 * use the SuperH page table tree.
4 *
7 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
8 * Copyright (C) 2002, 2003, 2004 Paul Mundt 6 * Copyright (C) 2002 - 2005 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file "COPYING" in the main directory of this
10 * archive for more details.
9 */ 11 */
12#ifndef __ASM_SH_PGTABLE_H
13#define __ASM_SH_PGTABLE_H
10 14
11#include <asm/pgtable-2level.h> 15#include <asm-generic/pgtable-nopmd.h>
16#include <asm/page.h>
17
18#define PTRS_PER_PGD 1024
12 19
13/*
14 * This file contains the functions and defines necessary to modify and use
15 * the SuperH page table tree.
16 */
17#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
18#include <asm/processor.h>
19#include <asm/addrspace.h> 21#include <asm/addrspace.h>
20#include <asm/fixmap.h> 22#include <asm/fixmap.h>
21#include <linux/threads.h>
22 23
23extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 24extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
24extern void paging_init(void); 25extern void paging_init(void);
25 26
26/* 27/*
27 * Basically we have the same two-level (which is the logical three level
28 * Linux page table layout folded) page tables as the i386.
29 */
30
31/*
32 * ZERO_PAGE is a global shared page that is always zero: used 28 * ZERO_PAGE is a global shared page that is always zero: used
33 * for zero-mapped memory areas etc.. 29 * for zero-mapped memory areas etc..
34 */ 30 */
35extern unsigned long empty_zero_page[1024]; 31extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
36#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 32#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
37 33
38#endif /* !__ASSEMBLY__ */ 34#endif /* !__ASSEMBLY__ */
39 35
36/* traditional two-level paging structure */
37#define PGDIR_SHIFT 22
38#define PTRS_PER_PMD 1
39#define PTRS_PER_PTE 1024
40#define PMD_SIZE (1UL << PMD_SHIFT) 40#define PMD_SIZE (1UL << PMD_SHIFT)
41#define PMD_MASK (~(PMD_SIZE-1)) 41#define PMD_MASK (~(PMD_SIZE-1))
42#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 42#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024];
47 47
48#define PTE_PHYS_MASK 0x1ffff000 48#define PTE_PHYS_MASK 0x1ffff000
49 49
50#ifndef __ASSEMBLY__
51/* 50/*
52 * First 1MB map is used by fixed purpose. 51 * First 1MB map is used by fixed purpose.
53 * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) 52 * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
@@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024];
65#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ 64#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
66#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ 65#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
67#define _PAGE_PROTNONE 0x200 /* software: if not present */ 66#define _PAGE_PROTNONE 0x200 /* software: if not present */
68#define _PAGE_ACCESSED 0x400 /* software: page referenced */ 67#define _PAGE_ACCESSED 0x400 /* software: page referenced */
69#define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */ 68#define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */
70 69
71#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ 70#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
@@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024];
83#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ 82#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
84#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ 83#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
85 84
86
87/* Mask which drop software flags 85/* Mask which drop software flags
88 * We also drop WT bit since it is used for _PAGE_FILE 86 * We also drop WT bit since it is used for _PAGE_FILE
89 * bit in this implementation. 87 * bit in this implementation.
@@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024];
115#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 113#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
116#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED) 114#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED)
117 115
116#ifndef __ASSEMBLY__
117
118#ifdef CONFIG_MMU 118#ifdef CONFIG_MMU
119#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) 119#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
120#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD) 120#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD)
@@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024];
137#define PAGE_KERNEL_PCC __pgprot(0) 137#define PAGE_KERNEL_PCC __pgprot(0)
138#endif 138#endif
139 139
140#endif /* __ASSEMBLY__ */
141
140/* 142/*
141 * As i386 and MIPS, SuperH can't do page protection for execute, and 143 * As i386 and MIPS, SuperH can't do page protection for execute, and
142 * considers that the same as a read. Also, write permissions imply 144 * considers that the same as a read. Also, write permissions imply
143 * read permissions. This is the closest we can get.. 145 * read permissions. This is the closest we can get..
144 */ 146 */
145
146#define __P000 PAGE_NONE 147#define __P000 PAGE_NONE
147#define __P001 PAGE_READONLY 148#define __P001 PAGE_READONLY
148#define __P010 PAGE_COPY 149#define __P010 PAGE_COPY
@@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024];
161#define __S110 PAGE_SHARED 162#define __S110 PAGE_SHARED
162#define __S111 PAGE_SHARED 163#define __S111 PAGE_SHARED
163 164
165#ifndef __ASSEMBLY__
166
167/*
168 * Certain architectures need to do special things when PTEs
169 * within a page table are directly modified. Thus, the following
170 * hook is made available.
171 */
172#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
173#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
174
175/*
176 * (pmds are folded into pgds so this doesn't get actually called,
177 * but the define is needed for a generic inline function.)
178 */
179#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
180
181#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
182#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
183#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
184
164#define pte_none(x) (!pte_val(x)) 185#define pte_none(x) (!pte_val(x))
165#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 186#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
166#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 187#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
@@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024];
171#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 192#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
172 193
173#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 194#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
174#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) 195#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
175 196
176/* 197/*
177 * The following only work if pte_present() is true. 198 * The following only work if pte_present() is true.
@@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
248#define pte_unmap(pte) do { } while (0) 269#define pte_unmap(pte) do { } while (0)
249#define pte_unmap_nested(pte) do { } while (0) 270#define pte_unmap_nested(pte) do { } while (0)
250 271
272#define pte_ERROR(e) \
273 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
274#define pgd_ERROR(e) \
275 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
276
251struct vm_area_struct; 277struct vm_area_struct;
252extern void update_mmu_cache(struct vm_area_struct * vma, 278extern void update_mmu_cache(struct vm_area_struct * vma,
253 unsigned long address, pte_t pte); 279 unsigned long address, pte_t pte);
@@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
272 298
273typedef pte_t *pte_addr_t; 299typedef pte_t *pte_addr_t;
274 300
275#endif /* !__ASSEMBLY__ */
276
277#define kern_addr_valid(addr) (1) 301#define kern_addr_valid(addr) (1)
278 302
279#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 303#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
@@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
301 325
302#include <asm-generic/pgtable.h> 326#include <asm-generic/pgtable.h>
303 327
328#endif /* !__ASSEMBLY__ */
329
304#endif /* __ASM_SH_PAGE_H */ 330#endif /* __ASM_SH_PAGE_H */
305 331