aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 13:32:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 13:32:01 -0400
commit9daeaa370526df1c19eba4780247bb7155541e38 (patch)
tree5ae2601c26e280e81d753c1fe65453a3b8b1d2a0 /arch/sparc/mm
parentcb62ab71fe2b16e8203a0f0a2ef4eda23d761338 (diff)
parent1edc17832d8f49a0263d364c453ea35da0e4e2a6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller: 1) Kill off support for sun4c and Cypress sun4m chips. And as a result we were able to also kill off that ugly btfixup thing that required multi-stage links of the final vmlinux image in the Kbuild system. This should make the kbuild maintainers really happy. Thanks a lot to Sam Ravnborg for his tireless efforts to get this going. 2) Convert sparc64 to nobootmem. I suspect now with sparc32 being a lot cleaner, it should be able to fall in line and modernize in this area too. 3) Make sparc32 use generic clockevents, from Tkhai Kirill. [ I fixed up the BPF rules, and tried to clean up the build rules too. But I don't have - or want - a sparc cross-build environment, so the BPF rule bug and the related build cleanup was all done with just a bare "make -n" pseudo-test. - Linus ] * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (110 commits) sparc32: use flushi when run-time patching in per_cpu_patch sparc32: fix cpuid_patch run-time patching sparc32: drop unused inline functions in srmmu.c sparc32: drop unused functions in pgtsrmmu.h sparc32,leon: move leon mmu functions to leon_mm.c sparc32,leon: remove duplicate definitions in leon.h sparc32,leon: remove duplicate UART register definitions sparc32,leon: move leon ASI definitions to asi.h sparc32: move trap table to a separate file sparc64: renamed ttable.S to ttable_64.S sparc32: Remove asm/sysen.h header. sparc32: Delete asm/smpprim.h sparc32: Remove unused empty_bad_page{,_table} declarations. sparc32: Kill boot_cpu_id4 sparc32: Move GET_PROCESSOR*_ID() out of asm/asmmacro.h sparc32: Remove completely unused code from asm/cache.h sparc32: Add ucmpdi2.o to obj-y instead of lib-y. sparc32: add ucmpdi2 sparc: introduce arch/sparc/Kbuild sparc: remove obsolete documentation ...
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile9
-rw-r--r--arch/sparc/mm/btfixup.c328
-rw-r--r--arch/sparc/mm/fault_32.c207
-rw-r--r--arch/sparc/mm/init_32.c51
-rw-r--r--arch/sparc/mm/init_64.c121
-rw-r--r--arch/sparc/mm/io-unit.c35
-rw-r--r--arch/sparc/mm/iommu.c71
-rw-r--r--arch/sparc/mm/leon_mm.c95
-rw-r--r--arch/sparc/mm/loadmmu.c43
-rw-r--r--arch/sparc/mm/nosun4c.c77
-rw-r--r--arch/sparc/mm/srmmu.c1232
-rw-r--r--arch/sparc/mm/srmmu.h4
-rw-r--r--arch/sparc/mm/sun4c.c2166
-rw-r--r--arch/sparc/mm/viking.S1
14 files changed, 523 insertions, 3917 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 301421c11291..69ffd3112fed 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -7,8 +7,7 @@ ccflags-y := -Werror
7obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o 7obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8obj-y += fault_$(BITS).o 8obj-y += fault_$(BITS).o
9obj-y += init_$(BITS).o 9obj-y += init_$(BITS).o
10obj-$(CONFIG_SPARC32) += loadmmu.o 10obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
11obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
12obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 11obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
13obj-$(CONFIG_SPARC_LEON)+= leon_mm.o 12obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
14 13
@@ -17,9 +16,3 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
17 16
18# Only used by sparc32 17# Only used by sparc32
19obj-$(CONFIG_HIGHMEM) += highmem.o 18obj-$(CONFIG_HIGHMEM) += highmem.o
20
21ifdef CONFIG_SMP
22obj-$(CONFIG_SPARC32) += nosun4c.o
23else
24obj-$(CONFIG_SPARC32) += sun4c.o
25endif
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
deleted file mode 100644
index 09d6af22db2d..000000000000
--- a/arch/sparc/mm/btfixup.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/* btfixup.c: Boot time code fixup and relocator, so that
2 * we can get rid of most indirect calls to achieve single
3 * image sun4c and srmmu kernel.
4 *
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <asm/btfixup.h>
11#include <asm/page.h>
12#include <asm/pgalloc.h>
13#include <asm/pgtable.h>
14#include <asm/oplib.h>
15#include <asm/cacheflush.h>
16
17#define BTFIXUP_OPTIMIZE_NOP
18#define BTFIXUP_OPTIMIZE_OTHER
19
20extern char *srmmu_name;
21static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
22static char str_sun4c[] __initdata = "sun4c\n";
23static char str_srmmu[] __initdata = "srmmu[%s]/";
24static char str_iommu[] __initdata = "iommu\n";
25static char str_iounit[] __initdata = "io-unit\n";
26
27static int visited __initdata = 0;
28extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
29extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
30static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
31static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
32static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
33static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
34static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
35static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
36static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
37static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
38static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
39static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
40static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
41static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
42static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
43static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
44
45#ifdef BTFIXUP_OPTIMIZE_OTHER
46static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
47{
48 if (!fmangled)
49 *addr = value;
50 else {
51 unsigned int *q = (unsigned int *)q1;
52 if (*addr == 0x01000000) {
53 /* Noped */
54 *q = value;
55 } else if (addr[-1] == *q) {
56 /* Moved */
57 addr[-1] = value;
58 *q = value;
59 } else {
60 prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
61 prom_halt();
62 }
63 }
64}
65#else
66static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
67{
68 *addr = value;
69}
70#endif
71
72void __init btfixup(void)
73{
74 unsigned int *p, *q;
75 int type, count;
76 unsigned insn;
77 unsigned *addr;
78 int fmangled = 0;
79 void (*flush_cacheall)(void);
80
81 if (!visited) {
82 visited++;
83 printk(version);
84 if (ARCH_SUN4C)
85 printk(str_sun4c);
86 else {
87 printk(str_srmmu, srmmu_name);
88 if (sparc_cpu_model == sun4d)
89 printk(str_iounit);
90 else
91 printk(str_iommu);
92 }
93 }
94 for (p = ___btfixup_start; p < ___btfixup_end; ) {
95 count = p[2];
96 q = p + 3;
97 switch (type = *(unsigned char *)p) {
98 case 'f':
99 count = p[3];
100 q = p + 4;
101 if (((p[0] & 1) || p[1])
102 && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
103 prom_printf(wrong_f, p, p[1]);
104 prom_halt();
105 }
106 break;
107 case 'b':
108 if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
109 prom_printf(wrong_b, p, p[1]);
110 prom_halt();
111 }
112 break;
113 case 's':
114 if (p[1] + 0x1000 >= 0x2000) {
115 prom_printf(wrong_s, p, p[1]);
116 prom_halt();
117 }
118 break;
119 case 'h':
120 if (p[1] & 0x3ff) {
121 prom_printf(wrong_h, p, p[1]);
122 prom_halt();
123 }
124 break;
125 case 'a':
126 if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
127 prom_printf(wrong_a, p, p[1]);
128 prom_halt();
129 }
130 break;
131 }
132 if (p[0] & 1) {
133 p[0] &= ~1;
134 while (count) {
135 fmangled = 0;
136 addr = (unsigned *)*q;
137 if (addr < _stext || addr >= _end) {
138 prom_printf(wrong, type, p);
139 prom_halt();
140 }
141 insn = *addr;
142#ifdef BTFIXUP_OPTIMIZE_OTHER
143 if (type != 'f' && q[1]) {
144 insn = *(unsigned int *)q[1];
145 if (!insn || insn == 1)
146 insn = *addr;
147 else
148 fmangled = 1;
149 }
150#endif
151 switch (type) {
152 case 'f': /* CALL */
153 if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
154 *addr = p[1];
155 break;
156 } else if (!q[1]) {
157 if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
158 *addr = (insn & 0xffc00000) | (p[1] >> 10); break;
159 } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
160 *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
161 } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
162 bad_f:
163 prom_printf(insn_f, p, addr, insn, addr[1]);
164 prom_halt();
165 }
166 } else if (q[1] != 1)
167 addr[1] = q[1];
168 if (p[2] == BTFIXUPCALL_NORM) {
169 norm_f:
170 *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
171 q[1] = 0;
172 break;
173 }
174#ifndef BTFIXUP_OPTIMIZE_NOP
175 goto norm_f;
176#else
177 if (!(addr[1] & 0x80000000)) {
178 if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
179 goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
180 } else {
181 if ((addr[1] & 0x01800000) == 0x01800000) {
182 if ((addr[1] & 0x01f80000) == 0x01e80000) {
183 /* RESTORE */
184 goto norm_f; /* It is dangerous to patch that */
185 }
186 goto bad_f;
187 }
188 if ((addr[1] & 0xffffe003) == 0x9e03e000) {
189 /* ADD %O7, XX, %o7 */
190 int displac = (addr[1] << 19);
191
192 displac = (displac >> 21) + 2;
193 *addr = (0x10800000) + (displac & 0x3fffff);
194 q[1] = addr[1];
195 addr[1] = p[2];
196 break;
197 }
198 if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
199 goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
200 if ((addr[1] & 0x3e000000) == 0x1e000000)
201 goto norm_f; /* rd is %o7. We'd better take care. */
202 }
203 if (p[2] == BTFIXUPCALL_NOP) {
204 *addr = 0x01000000;
205 q[1] = 1;
206 break;
207 }
208#ifndef BTFIXUP_OPTIMIZE_OTHER
209 goto norm_f;
210#else
211 if (addr[1] == 0x01000000) { /* NOP in the delay slot */
212 q[1] = addr[1];
213 *addr = p[2];
214 break;
215 }
216 if ((addr[1] & 0xc0000000) != 0xc0000000) {
217 /* Not a memory operation */
218 if ((addr[1] & 0x30000000) == 0x10000000) {
219 /* Ok, non-memory op with rd %oX */
220 if ((addr[1] & 0x3e000000) == 0x1c000000)
221 goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
222 if ((addr[1] & 0x3e000000) > 0x12000000 ||
223 ((addr[1] & 0x3e000000) == 0x12000000 &&
224 p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
225 ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
226 /* Nobody uses the result. We can nop it out. */
227 *addr = p[2];
228 q[1] = addr[1];
229 addr[1] = 0x01000000;
230 break;
231 }
232 if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
233 /* MOV %reg, %Ox */
234 if ((addr[1] & 0x3e000000) == 0x10000000 &&
235 (p[2] & 0x7c000) == 0x20000) {
236 /* Ok, it is call xx; mov reg, %o0 and call optimizes
237 to doing something on %o0. Patch the patch. */
238 *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
239 q[1] = addr[1];
240 addr[1] = 0x01000000;
241 break;
242 }
243 if ((addr[1] & 0x3e000000) == 0x12000000 &&
244 p[2] == BTFIXUPCALL_STO1O0) {
245 *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
246 q[1] = addr[1];
247 addr[1] = 0x01000000;
248 break;
249 }
250 }
251 }
252 }
253 *addr = addr[1];
254 q[1] = addr[1];
255 addr[1] = p[2];
256 break;
257#endif /* BTFIXUP_OPTIMIZE_OTHER */
258#endif /* BTFIXUP_OPTIMIZE_NOP */
259 case 'b': /* BLACKBOX */
260 /* Has to be sethi i, xx */
261 if ((insn & 0xc1c00000) != 0x01000000) {
262 prom_printf(insn_b, p, addr, insn);
263 prom_halt();
264 } else {
265 void (*do_fixup)(unsigned *);
266
267 do_fixup = (void (*)(unsigned *))p[1];
268 do_fixup(addr);
269 }
270 break;
271 case 's': /* SIMM13 */
272 /* Has to be or %g0, i, xx */
273 if ((insn & 0xc1ffe000) != 0x80102000) {
274 prom_printf(insn_s, p, addr, insn);
275 prom_halt();
276 }
277 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
278 break;
279 case 'h': /* SETHI */
280 /* Has to be sethi i, xx */
281 if ((insn & 0xc1c00000) != 0x01000000) {
282 prom_printf(insn_h, p, addr, insn);
283 prom_halt();
284 }
285 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
286 break;
287 case 'a': /* HALF */
288 /* Has to be sethi i, xx or or %g0, i, xx */
289 if ((insn & 0xc1c00000) != 0x01000000 &&
290 (insn & 0xc1ffe000) != 0x80102000) {
291 prom_printf(insn_a, p, addr, insn);
292 prom_halt();
293 }
294 if (p[1] & 0x3ff)
295 set_addr(addr, q[1], fmangled,
296 (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
297 else
298 set_addr(addr, q[1], fmangled,
299 (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
300 break;
301 case 'i': /* INT */
302 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
303 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
304 else if ((insn & 0x80002000) == 0x80002000) /* %LO */
305 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
306 else {
307 prom_printf(insn_i, p, addr, insn);
308 prom_halt();
309 }
310 break;
311 }
312 count -= 2;
313 q += 2;
314 }
315 } else
316 p = q + count;
317 }
318#ifdef CONFIG_SMP
319 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
320#else
321 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
322#endif
323 if (!flush_cacheall) {
324 prom_printf(fca_und);
325 prom_halt();
326 }
327 (*flush_cacheall)();
328}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index df3155a17991..f46cf6be3370 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -24,29 +24,19 @@
24 24
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/memreg.h>
28#include <asm/openprom.h> 27#include <asm/openprom.h>
29#include <asm/oplib.h> 28#include <asm/oplib.h>
30#include <asm/smp.h> 29#include <asm/smp.h>
31#include <asm/traps.h> 30#include <asm/traps.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
33 32
34extern int prom_node_root;
35
36int show_unhandled_signals = 1; 33int show_unhandled_signals = 1;
37 34
38/* At boot time we determine these two values necessary for setting 35/* At boot time we determine these two values necessary for setting
39 * up the segment maps and page table entries (pte's). 36 * up the segment maps and page table entries (pte's).
40 */ 37 */
41 38
42int num_segmaps, num_contexts; 39int num_contexts;
43int invalid_segment;
44
45/* various Virtual Address Cache parameters we find at boot time... */
46
47int vac_size, vac_linesize, vac_do_hw_vac_flushes;
48int vac_entries_per_context, vac_entries_per_segment;
49int vac_entries_per_page;
50 40
51/* Return how much physical memory we have. */ 41/* Return how much physical memory we have. */
52unsigned long probe_memory(void) 42unsigned long probe_memory(void)
@@ -60,55 +50,36 @@ unsigned long probe_memory(void)
60 return total; 50 return total;
61} 51}
62 52
63extern void sun4c_complete_all_stores(void);
64
65/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
66asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
67 unsigned long svaddr, unsigned long aerr,
68 unsigned long avaddr)
69{
70 sun4c_complete_all_stores();
71 printk("FAULT: NMI received\n");
72 printk("SREGS: Synchronous Error %08lx\n", serr);
73 printk(" Synchronous Vaddr %08lx\n", svaddr);
74 printk(" Asynchronous Error %08lx\n", aerr);
75 printk(" Asynchronous Vaddr %08lx\n", avaddr);
76 if (sun4c_memerr_reg)
77 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
78 printk("REGISTER DUMP:\n");
79 show_regs(regs);
80 prom_halt();
81}
82
83static void unhandled_fault(unsigned long, struct task_struct *, 53static void unhandled_fault(unsigned long, struct task_struct *,
84 struct pt_regs *) __attribute__ ((noreturn)); 54 struct pt_regs *) __attribute__ ((noreturn));
85 55
86static void unhandled_fault(unsigned long address, struct task_struct *tsk, 56static void __noreturn unhandled_fault(unsigned long address,
87 struct pt_regs *regs) 57 struct task_struct *tsk,
58 struct pt_regs *regs)
88{ 59{
89 if((unsigned long) address < PAGE_SIZE) { 60 if ((unsigned long) address < PAGE_SIZE) {
90 printk(KERN_ALERT 61 printk(KERN_ALERT
91 "Unable to handle kernel NULL pointer dereference\n"); 62 "Unable to handle kernel NULL pointer dereference\n");
92 } else { 63 } else {
93 printk(KERN_ALERT "Unable to handle kernel paging request " 64 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
94 "at virtual address %08lx\n", address); 65 address);
95 } 66 }
96 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", 67 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
97 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); 68 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
98 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", 69 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
99 (tsk->mm ? (unsigned long) tsk->mm->pgd : 70 (tsk->mm ? (unsigned long) tsk->mm->pgd :
100 (unsigned long) tsk->active_mm->pgd)); 71 (unsigned long) tsk->active_mm->pgd));
101 die_if_kernel("Oops", regs); 72 die_if_kernel("Oops", regs);
102} 73}
103 74
104asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 75asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
105 unsigned long address) 76 unsigned long address)
106{ 77{
107 struct pt_regs regs; 78 struct pt_regs regs;
108 unsigned long g2; 79 unsigned long g2;
109 unsigned int insn; 80 unsigned int insn;
110 int i; 81 int i;
111 82
112 i = search_extables_range(ret_pc, &g2); 83 i = search_extables_range(ret_pc, &g2);
113 switch (i) { 84 switch (i) {
114 case 3: 85 case 3:
@@ -128,14 +99,14 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
128 /* for _from_ macros */ 99 /* for _from_ macros */
129 insn = *((unsigned int *) pc); 100 insn = *((unsigned int *) pc);
130 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) 101 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
131 return 2; 102 return 2;
132 break; 103 break;
133 104
134 default: 105 default:
135 break; 106 break;
136 } 107 }
137 108
138 memset(&regs, 0, sizeof (regs)); 109 memset(&regs, 0, sizeof(regs));
139 regs.pc = pc; 110 regs.pc = pc;
140 regs.npc = pc + 4; 111 regs.npc = pc + 4;
141 __asm__ __volatile__( 112 __asm__ __volatile__(
@@ -198,11 +169,10 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
198 if (text_fault) 169 if (text_fault)
199 return regs->pc; 170 return regs->pc;
200 171
201 if (regs->psr & PSR_PS) { 172 if (regs->psr & PSR_PS)
202 insn = *(unsigned int *) regs->pc; 173 insn = *(unsigned int *) regs->pc;
203 } else { 174 else
204 __get_user(insn, (unsigned int *) regs->pc); 175 __get_user(insn, (unsigned int *) regs->pc);
205 }
206 176
207 return safe_compute_effective_address(regs, insn); 177 return safe_compute_effective_address(regs, insn);
208} 178}
@@ -228,7 +198,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
228 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 198 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
229 (write ? FAULT_FLAG_WRITE : 0)); 199 (write ? FAULT_FLAG_WRITE : 0));
230 200
231 if(text_fault) 201 if (text_fault)
232 address = regs->pc; 202 address = regs->pc;
233 203
234 /* 204 /*
@@ -241,36 +211,32 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
241 * nothing more. 211 * nothing more.
242 */ 212 */
243 code = SEGV_MAPERR; 213 code = SEGV_MAPERR;
244 if (!ARCH_SUN4C && address >= TASK_SIZE) 214 if (address >= TASK_SIZE)
245 goto vmalloc_fault; 215 goto vmalloc_fault;
246 216
247 /* 217 /*
248 * If we're in an interrupt or have no user 218 * If we're in an interrupt or have no user
249 * context, we must not take the fault.. 219 * context, we must not take the fault..
250 */ 220 */
251 if (in_atomic() || !mm) 221 if (in_atomic() || !mm)
252 goto no_context; 222 goto no_context;
253 223
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 224 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255 225
256retry: 226retry:
257 down_read(&mm->mmap_sem); 227 down_read(&mm->mmap_sem);
258 228
259 /* 229 if (!from_user && address >= PAGE_OFFSET)
260 * The kernel referencing a bad kernel pointer can lock up
261 * a sun4c machine completely, so we must attempt recovery.
262 */
263 if(!from_user && address >= PAGE_OFFSET)
264 goto bad_area; 230 goto bad_area;
265 231
266 vma = find_vma(mm, address); 232 vma = find_vma(mm, address);
267 if(!vma) 233 if (!vma)
268 goto bad_area; 234 goto bad_area;
269 if(vma->vm_start <= address) 235 if (vma->vm_start <= address)
270 goto good_area; 236 goto good_area;
271 if(!(vma->vm_flags & VM_GROWSDOWN)) 237 if (!(vma->vm_flags & VM_GROWSDOWN))
272 goto bad_area; 238 goto bad_area;
273 if(expand_stack(vma, address)) 239 if (expand_stack(vma, address))
274 goto bad_area; 240 goto bad_area;
275 /* 241 /*
276 * Ok, we have a good vm_area for this memory access, so 242 * Ok, we have a good vm_area for this memory access, so
@@ -278,12 +244,12 @@ retry:
278 */ 244 */
279good_area: 245good_area:
280 code = SEGV_ACCERR; 246 code = SEGV_ACCERR;
281 if(write) { 247 if (write) {
282 if(!(vma->vm_flags & VM_WRITE)) 248 if (!(vma->vm_flags & VM_WRITE))
283 goto bad_area; 249 goto bad_area;
284 } else { 250 } else {
285 /* Allow reads even for write-only mappings */ 251 /* Allow reads even for write-only mappings */
286 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 252 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
287 goto bad_area; 253 goto bad_area;
288 } 254 }
289 255
@@ -349,14 +315,16 @@ no_context:
349 g2 = regs->u_regs[UREG_G2]; 315 g2 = regs->u_regs[UREG_G2];
350 if (!from_user) { 316 if (!from_user) {
351 fixup = search_extables_range(regs->pc, &g2); 317 fixup = search_extables_range(regs->pc, &g2);
352 if (fixup > 10) { /* Values below are reserved for other things */ 318 /* Values below 10 are reserved for other things */
319 if (fixup > 10) {
353 extern const unsigned __memset_start[]; 320 extern const unsigned __memset_start[];
354 extern const unsigned __memset_end[]; 321 extern const unsigned __memset_end[];
355 extern const unsigned __csum_partial_copy_start[]; 322 extern const unsigned __csum_partial_copy_start[];
356 extern const unsigned __csum_partial_copy_end[]; 323 extern const unsigned __csum_partial_copy_end[];
357 324
358#ifdef DEBUG_EXCEPTIONS 325#ifdef DEBUG_EXCEPTIONS
359 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); 326 printk("Exception: PC<%08lx> faddr<%08lx>\n",
327 regs->pc, address);
360 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", 328 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
361 regs->pc, fixup, g2); 329 regs->pc, fixup, g2);
362#endif 330#endif
@@ -364,7 +332,7 @@ no_context:
364 regs->pc < (unsigned long)__memset_end) || 332 regs->pc < (unsigned long)__memset_end) ||
365 (regs->pc >= (unsigned long)__csum_partial_copy_start && 333 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
366 regs->pc < (unsigned long)__csum_partial_copy_end)) { 334 regs->pc < (unsigned long)__csum_partial_copy_end)) {
367 regs->u_regs[UREG_I4] = address; 335 regs->u_regs[UREG_I4] = address;
368 regs->u_regs[UREG_I5] = regs->pc; 336 regs->u_regs[UREG_I5] = regs->pc;
369 } 337 }
370 regs->u_regs[UREG_G2] = g2; 338 regs->u_regs[UREG_G2] = g2;
@@ -373,8 +341,8 @@ no_context:
373 return; 341 return;
374 } 342 }
375 } 343 }
376 344
377 unhandled_fault (address, tsk, regs); 345 unhandled_fault(address, tsk, regs);
378 do_exit(SIGKILL); 346 do_exit(SIGKILL);
379 347
380/* 348/*
@@ -420,97 +388,12 @@ vmalloc_fault:
420 388
421 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 389 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
422 goto bad_area_nosemaphore; 390 goto bad_area_nosemaphore;
391
423 *pmd = *pmd_k; 392 *pmd = *pmd_k;
424 return; 393 return;
425 } 394 }
426} 395}
427 396
428asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
429 unsigned long address)
430{
431 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
432 unsigned long,pte_t *);
433 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
434 struct task_struct *tsk = current;
435 struct mm_struct *mm = tsk->mm;
436 pgd_t *pgdp;
437 pte_t *ptep;
438
439 if (text_fault) {
440 address = regs->pc;
441 } else if (!write &&
442 !(regs->psr & PSR_PS)) {
443 unsigned int insn, __user *ip;
444
445 ip = (unsigned int __user *)regs->pc;
446 if (!get_user(insn, ip)) {
447 if ((insn & 0xc1680000) == 0xc0680000)
448 write = 1;
449 }
450 }
451
452 if (!mm) {
453 /* We are oopsing. */
454 do_sparc_fault(regs, text_fault, write, address);
455 BUG(); /* P3 Oops already, you bitch */
456 }
457
458 pgdp = pgd_offset(mm, address);
459 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
460
461 if (pgd_val(*pgdp)) {
462 if (write) {
463 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
464 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
465 unsigned long flags;
466
467 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
468 _SUN4C_PAGE_MODIFIED |
469 _SUN4C_PAGE_VALID |
470 _SUN4C_PAGE_DIRTY);
471
472 local_irq_save(flags);
473 if (sun4c_get_segmap(address) != invalid_segment) {
474 sun4c_put_pte(address, pte_val(*ptep));
475 local_irq_restore(flags);
476 return;
477 }
478 local_irq_restore(flags);
479 }
480 } else {
481 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
482 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
483 unsigned long flags;
484
485 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
486 _SUN4C_PAGE_VALID);
487
488 local_irq_save(flags);
489 if (sun4c_get_segmap(address) != invalid_segment) {
490 sun4c_put_pte(address, pte_val(*ptep));
491 local_irq_restore(flags);
492 return;
493 }
494 local_irq_restore(flags);
495 }
496 }
497 }
498
499 /* This conditional is 'interesting'. */
500 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
501 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
502 /* Note: It is safe to not grab the MMAP semaphore here because
503 * we know that update_mmu_cache() will not sleep for
504 * any reason (at least not in the current implementation)
505 * and therefore there is no danger of another thread getting
506 * on the CPU and doing a shrink_mmap() on this vma.
507 */
508 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
509 ptep);
510 else
511 do_sparc_fault(regs, text_fault, write, address);
512}
513
514/* This always deals with user addresses. */ 397/* This always deals with user addresses. */
515static void force_user_fault(unsigned long address, int write) 398static void force_user_fault(unsigned long address, int write)
516{ 399{
@@ -523,21 +406,21 @@ static void force_user_fault(unsigned long address, int write)
523 406
524 down_read(&mm->mmap_sem); 407 down_read(&mm->mmap_sem);
525 vma = find_vma(mm, address); 408 vma = find_vma(mm, address);
526 if(!vma) 409 if (!vma)
527 goto bad_area; 410 goto bad_area;
528 if(vma->vm_start <= address) 411 if (vma->vm_start <= address)
529 goto good_area; 412 goto good_area;
530 if(!(vma->vm_flags & VM_GROWSDOWN)) 413 if (!(vma->vm_flags & VM_GROWSDOWN))
531 goto bad_area; 414 goto bad_area;
532 if(expand_stack(vma, address)) 415 if (expand_stack(vma, address))
533 goto bad_area; 416 goto bad_area;
534good_area: 417good_area:
535 code = SEGV_ACCERR; 418 code = SEGV_ACCERR;
536 if(write) { 419 if (write) {
537 if(!(vma->vm_flags & VM_WRITE)) 420 if (!(vma->vm_flags & VM_WRITE))
538 goto bad_area; 421 goto bad_area;
539 } else { 422 } else {
540 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 423 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
541 goto bad_area; 424 goto bad_area;
542 } 425 }
543 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { 426 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
@@ -568,7 +451,7 @@ void window_overflow_fault(void)
568 unsigned long sp; 451 unsigned long sp;
569 452
570 sp = current_thread_info()->rwbuf_stkptrs[0]; 453 sp = current_thread_info()->rwbuf_stkptrs[0];
571 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 454 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
572 force_user_fault(sp + 0x38, 1); 455 force_user_fault(sp + 0x38, 1);
573 force_user_fault(sp, 1); 456 force_user_fault(sp, 1);
574 457
@@ -577,7 +460,7 @@ void window_overflow_fault(void)
577 460
578void window_underflow_fault(unsigned long sp) 461void window_underflow_fault(unsigned long sp)
579{ 462{
580 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 463 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
581 force_user_fault(sp + 0x38, 0); 464 force_user_fault(sp + 0x38, 0);
582 force_user_fault(sp, 0); 465 force_user_fault(sp, 0);
583 466
@@ -589,7 +472,7 @@ void window_ret_fault(struct pt_regs *regs)
589 unsigned long sp; 472 unsigned long sp;
590 473
591 sp = regs->u_regs[UREG_FP]; 474 sp = regs->u_regs[UREG_FP];
592 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 475 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
593 force_user_fault(sp + 0x38, 0); 476 force_user_fault(sp + 0x38, 0);
594 force_user_fault(sp, 0); 477 force_user_fault(sp, 0);
595 478
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c5f9021b1a01..ef5c779ec855 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -27,7 +27,6 @@
27#include <linux/gfp.h> 27#include <linux/gfp.h>
28 28
29#include <asm/sections.h> 29#include <asm/sections.h>
30#include <asm/vac-ops.h>
31#include <asm/page.h> 30#include <asm/page.h>
32#include <asm/pgtable.h> 31#include <asm/pgtable.h>
33#include <asm/vaddrs.h> 32#include <asm/vaddrs.h>
@@ -45,9 +44,6 @@ EXPORT_SYMBOL(phys_base);
45unsigned long pfn_base; 44unsigned long pfn_base;
46EXPORT_SYMBOL(pfn_base); 45EXPORT_SYMBOL(pfn_base);
47 46
48unsigned long page_kernel;
49EXPORT_SYMBOL(page_kernel);
50
51struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; 47struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
52unsigned long sparc_unmapped_base; 48unsigned long sparc_unmapped_base;
53 49
@@ -287,44 +283,16 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
287} 283}
288 284
289/* 285/*
290 * check_pgt_cache
291 *
292 * This is called at the end of unmapping of VMA (zap_page_range),
293 * to rescan the page cache for architecture specific things,
294 * presumably something like sun4/sun4c PMEGs. Most architectures
295 * define check_pgt_cache empty.
296 *
297 * We simply copy the 2.4 implementation for now.
298 */
299static int pgt_cache_water[2] = { 25, 50 };
300
301void check_pgt_cache(void)
302{
303 do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
304}
305
306/*
307 * paging_init() sets up the page tables: We call the MMU specific 286 * paging_init() sets up the page tables: We call the MMU specific
308 * init routine based upon the Sun model type on the Sparc. 287 * init routine based upon the Sun model type on the Sparc.
309 * 288 *
310 */ 289 */
311extern void sun4c_paging_init(void);
312extern void srmmu_paging_init(void); 290extern void srmmu_paging_init(void);
313extern void device_scan(void); 291extern void device_scan(void);
314 292
315pgprot_t PAGE_SHARED __read_mostly;
316EXPORT_SYMBOL(PAGE_SHARED);
317
318void __init paging_init(void) 293void __init paging_init(void)
319{ 294{
320 switch(sparc_cpu_model) { 295 switch(sparc_cpu_model) {
321 case sun4c:
322 case sun4e:
323 case sun4:
324 sun4c_paging_init();
325 sparc_unmapped_base = 0xe0000000;
326 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
327 break;
328 case sparc_leon: 296 case sparc_leon:
329 leon_init(); 297 leon_init();
330 /* fall through */ 298 /* fall through */
@@ -332,7 +300,6 @@ void __init paging_init(void)
332 case sun4d: 300 case sun4d:
333 srmmu_paging_init(); 301 srmmu_paging_init();
334 sparc_unmapped_base = 0x50000000; 302 sparc_unmapped_base = 0x50000000;
335 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
336 break; 303 break;
337 default: 304 default:
338 prom_printf("paging_init: Cannot init paging on this Sparc\n"); 305 prom_printf("paging_init: Cannot init paging on this Sparc\n");
@@ -341,24 +308,6 @@ void __init paging_init(void)
341 prom_halt(); 308 prom_halt();
342 } 309 }
343 310
344 /* Initialize the protection map with non-constant, MMU dependent values. */
345 protection_map[0] = PAGE_NONE;
346 protection_map[1] = PAGE_READONLY;
347 protection_map[2] = PAGE_COPY;
348 protection_map[3] = PAGE_COPY;
349 protection_map[4] = PAGE_READONLY;
350 protection_map[5] = PAGE_READONLY;
351 protection_map[6] = PAGE_COPY;
352 protection_map[7] = PAGE_COPY;
353 protection_map[8] = PAGE_NONE;
354 protection_map[9] = PAGE_READONLY;
355 protection_map[10] = PAGE_SHARED;
356 protection_map[11] = PAGE_SHARED;
357 protection_map[12] = PAGE_READONLY;
358 protection_map[13] = PAGE_READONLY;
359 protection_map[14] = PAGE_SHARED;
360 protection_map[15] = PAGE_SHARED;
361 btfixup();
362 prom_build_devicetree(); 311 prom_build_devicetree();
363 of_fill_in_cpu_data(); 312 of_fill_in_cpu_data();
364 device_scan(); 313 device_scan();
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 21faaeea85de..6026fdd1b2ed 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -741,7 +741,6 @@ static void __init find_ramdisk(unsigned long phys_base)
741struct node_mem_mask { 741struct node_mem_mask {
742 unsigned long mask; 742 unsigned long mask;
743 unsigned long val; 743 unsigned long val;
744 unsigned long bootmem_paddr;
745}; 744};
746static struct node_mem_mask node_masks[MAX_NUMNODES]; 745static struct node_mem_mask node_masks[MAX_NUMNODES];
747static int num_node_masks; 746static int num_node_masks;
@@ -806,12 +805,6 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
806 805
807 return start; 806 return start;
808} 807}
809#else
810static u64 memblock_nid_range(u64 start, u64 end, int *nid)
811{
812 *nid = 0;
813 return end;
814}
815#endif 808#endif
816 809
817/* This must be invoked after performing all of the necessary 810/* This must be invoked after performing all of the necessary
@@ -820,10 +813,11 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
820 */ 813 */
821static void __init allocate_node_data(int nid) 814static void __init allocate_node_data(int nid)
822{ 815{
823 unsigned long paddr, num_pages, start_pfn, end_pfn;
824 struct pglist_data *p; 816 struct pglist_data *p;
825 817 unsigned long start_pfn, end_pfn;
826#ifdef CONFIG_NEED_MULTIPLE_NODES 818#ifdef CONFIG_NEED_MULTIPLE_NODES
819 unsigned long paddr;
820
827 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 821 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
828 if (!paddr) { 822 if (!paddr) {
829 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 823 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -832,7 +826,7 @@ static void __init allocate_node_data(int nid)
832 NODE_DATA(nid) = __va(paddr); 826 NODE_DATA(nid) = __va(paddr);
833 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 827 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
834 828
835 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 829 NODE_DATA(nid)->node_id = nid;
836#endif 830#endif
837 831
838 p = NODE_DATA(nid); 832 p = NODE_DATA(nid);
@@ -840,18 +834,6 @@ static void __init allocate_node_data(int nid)
840 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 834 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
841 p->node_start_pfn = start_pfn; 835 p->node_start_pfn = start_pfn;
842 p->node_spanned_pages = end_pfn - start_pfn; 836 p->node_spanned_pages = end_pfn - start_pfn;
843
844 if (p->node_spanned_pages) {
845 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
846
847 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
848 if (!paddr) {
849 prom_printf("Cannot allocate bootmap for nid[%d]\n",
850 nid);
851 prom_halt();
852 }
853 node_masks[nid].bootmem_paddr = paddr;
854 }
855} 837}
856 838
857static void init_node_masks_nonnuma(void) 839static void init_node_masks_nonnuma(void)
@@ -1292,75 +1274,9 @@ static void __init bootmem_init_nonnuma(void)
1292 node_set_online(0); 1274 node_set_online(0);
1293} 1275}
1294 1276
1295static void __init reserve_range_in_node(int nid, unsigned long start,
1296 unsigned long end)
1297{
1298 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1299 nid, start, end);
1300 while (start < end) {
1301 unsigned long this_end;
1302 int n;
1303
1304 this_end = memblock_nid_range(start, end, &n);
1305 if (n == nid) {
1306 numadbg(" MATCH reserving range [%lx:%lx]\n",
1307 start, this_end);
1308 reserve_bootmem_node(NODE_DATA(nid), start,
1309 (this_end - start), BOOTMEM_DEFAULT);
1310 } else
1311 numadbg(" NO MATCH, advancing start to %lx\n",
1312 this_end);
1313
1314 start = this_end;
1315 }
1316}
1317
1318static void __init trim_reserved_in_node(int nid)
1319{
1320 struct memblock_region *reg;
1321
1322 numadbg(" trim_reserved_in_node(%d)\n", nid);
1323
1324 for_each_memblock(reserved, reg)
1325 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
1326}
1327
1328static void __init bootmem_init_one_node(int nid)
1329{
1330 struct pglist_data *p;
1331
1332 numadbg("bootmem_init_one_node(%d)\n", nid);
1333
1334 p = NODE_DATA(nid);
1335
1336 if (p->node_spanned_pages) {
1337 unsigned long paddr = node_masks[nid].bootmem_paddr;
1338 unsigned long end_pfn;
1339
1340 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1341
1342 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1343 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1344
1345 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1346 p->node_start_pfn, end_pfn);
1347
1348 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1349 nid, end_pfn);
1350 free_bootmem_with_active_regions(nid, end_pfn);
1351
1352 trim_reserved_in_node(nid);
1353
1354 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1355 nid);
1356 sparse_memory_present_with_active_regions(nid);
1357 }
1358}
1359
1360static unsigned long __init bootmem_init(unsigned long phys_base) 1277static unsigned long __init bootmem_init(unsigned long phys_base)
1361{ 1278{
1362 unsigned long end_pfn; 1279 unsigned long end_pfn;
1363 int nid;
1364 1280
1365 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1281 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1366 max_pfn = max_low_pfn = end_pfn; 1282 max_pfn = max_low_pfn = end_pfn;
@@ -1369,11 +1285,12 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
1369 if (bootmem_init_numa() < 0) 1285 if (bootmem_init_numa() < 0)
1370 bootmem_init_nonnuma(); 1286 bootmem_init_nonnuma();
1371 1287
1372 /* XXX cpu notifier XXX */ 1288 /* Dump memblock with node info. */
1289 memblock_dump_all();
1373 1290
1374 for_each_online_node(nid) 1291 /* XXX cpu notifier XXX */
1375 bootmem_init_one_node(nid);
1376 1292
1293 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1377 sparse_init(); 1294 sparse_init();
1378 1295
1379 return end_pfn; 1296 return end_pfn;
@@ -1701,6 +1618,7 @@ void __init paging_init(void)
1701{ 1618{
1702 unsigned long end_pfn, shift, phys_base; 1619 unsigned long end_pfn, shift, phys_base;
1703 unsigned long real_end, i; 1620 unsigned long real_end, i;
1621 int node;
1704 1622
1705 /* These build time checkes make sure that the dcache_dirty_cpu() 1623 /* These build time checkes make sure that the dcache_dirty_cpu()
1706 * page->flags usage will work. 1624 * page->flags usage will work.
@@ -1826,22 +1744,24 @@ void __init paging_init(void)
1826#endif 1744#endif
1827 } 1745 }
1828 1746
1747 /* Setup bootmem... */
1748 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1749
1829 /* Once the OF device tree and MDESC have been setup, we know 1750 /* Once the OF device tree and MDESC have been setup, we know
1830 * the list of possible cpus. Therefore we can allocate the 1751 * the list of possible cpus. Therefore we can allocate the
1831 * IRQ stacks. 1752 * IRQ stacks.
1832 */ 1753 */
1833 for_each_possible_cpu(i) { 1754 for_each_possible_cpu(i) {
1834 /* XXX Use node local allocations... XXX */ 1755 node = cpu_to_node(i);
1835 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1836 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
1837 }
1838 1756
1839 /* Setup bootmem... */ 1757 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1840 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1758 THREAD_SIZE,
1759 THREAD_SIZE, 0);
1760 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1761 THREAD_SIZE,
1762 THREAD_SIZE, 0);
1763 }
1841 1764
1842#ifndef CONFIG_NEED_MULTIPLE_NODES
1843 max_mapnr = last_valid_pfn;
1844#endif
1845 kernel_physical_mapping_init(); 1765 kernel_physical_mapping_init();
1846 1766
1847 { 1767 {
@@ -1973,6 +1893,7 @@ void __init mem_init(void)
1973 free_all_bootmem_node(NODE_DATA(i)); 1893 free_all_bootmem_node(NODE_DATA(i));
1974 } 1894 }
1975 } 1895 }
1896 totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
1976 } 1897 }
1977#else 1898#else
1978 totalram_pages = free_all_bootmem(); 1899 totalram_pages = free_all_bootmem();
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index fc58c3e917df..eb99862e9654 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -197,7 +197,7 @@ static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg,
197} 197}
198 198
199#ifdef CONFIG_SBUS 199#ifdef CONFIG_SBUS
200static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len) 200static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
201{ 201{
202 struct iounit_struct *iounit = dev->archdata.iommu; 202 struct iounit_struct *iounit = dev->archdata.iommu;
203 unsigned long page, end; 203 unsigned long page, end;
@@ -242,29 +242,18 @@ static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int le
242} 242}
243#endif 243#endif
244 244
245static char *iounit_lockarea(char *vaddr, unsigned long len) 245static const struct sparc32_dma_ops iounit_dma_ops = {
246{ 246 .get_scsi_one = iounit_get_scsi_one,
247/* FIXME: Write this */ 247 .get_scsi_sgl = iounit_get_scsi_sgl,
248 return vaddr; 248 .release_scsi_one = iounit_release_scsi_one,
249} 249 .release_scsi_sgl = iounit_release_scsi_sgl,
250 250#ifdef CONFIG_SBUS
251static void iounit_unlockarea(char *vaddr, unsigned long len) 251 .map_dma_area = iounit_map_dma_area,
252{ 252 .unmap_dma_area = iounit_unmap_dma_area,
253/* FIXME: Write this */ 253#endif
254} 254};
255 255
256void __init ld_mmu_iounit(void) 256void __init ld_mmu_iounit(void)
257{ 257{
258 BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0); 258 sparc32_dma_ops = &iounit_dma_ops;
259 BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
260
261 BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
262 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
263 BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
264 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
265
266#ifdef CONFIG_SBUS
267 BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
268 BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
269#endif
270} 259}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 07fc6a65d9b6..a8a58cad9d2b 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -39,8 +39,6 @@
39 39
40/* srmmu.c */ 40/* srmmu.c */
41extern int viking_mxcc_present; 41extern int viking_mxcc_present;
42BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44extern int flush_page_for_dma_global; 42extern int flush_page_for_dma_global;
45static int viking_flush; 43static int viking_flush;
46/* viking.S */ 44/* viking.S */
@@ -143,7 +141,6 @@ static int __init iommu_init(void)
143 141
144subsys_initcall(iommu_init); 142subsys_initcall(iommu_init);
145 143
146/* This begs to be btfixup-ed by srmmu. */
147/* Flush the iotlb entries to ram. */ 144/* Flush the iotlb entries to ram. */
148/* This could be better if we didn't have to flush whole pages. */ 145/* This could be better if we didn't have to flush whole pages. */
149static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) 146static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
@@ -216,11 +213,6 @@ static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
216 return busa + off; 213 return busa + off;
217} 214}
218 215
219static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220{
221 return iommu_get_scsi_one(dev, vaddr, len);
222}
223
224static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) 216static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225{ 217{
226 flush_page_for_dma(0); 218 flush_page_for_dma(0);
@@ -238,19 +230,6 @@ static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned
238 return iommu_get_scsi_one(dev, vaddr, len); 230 return iommu_get_scsi_one(dev, vaddr, len);
239} 231}
240 232
241static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
242{
243 int n;
244
245 while (sz != 0) {
246 --sz;
247 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
248 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
249 sg->dma_length = sg->length;
250 sg = sg_next(sg);
251 }
252}
253
254static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) 233static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255{ 234{
256 int n; 235 int n;
@@ -426,40 +405,36 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
426} 405}
427#endif 406#endif
428 407
429static char *iommu_lockarea(char *vaddr, unsigned long len) 408static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
430{ 409 .get_scsi_one = iommu_get_scsi_one_gflush,
431 return vaddr; 410 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
432} 411 .release_scsi_one = iommu_release_scsi_one,
412 .release_scsi_sgl = iommu_release_scsi_sgl,
413#ifdef CONFIG_SBUS
414 .map_dma_area = iommu_map_dma_area,
415 .unmap_dma_area = iommu_unmap_dma_area,
416#endif
417};
433 418
434static void iommu_unlockarea(char *vaddr, unsigned long len) 419static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
435{ 420 .get_scsi_one = iommu_get_scsi_one_pflush,
436} 421 .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
422 .release_scsi_one = iommu_release_scsi_one,
423 .release_scsi_sgl = iommu_release_scsi_sgl,
424#ifdef CONFIG_SBUS
425 .map_dma_area = iommu_map_dma_area,
426 .unmap_dma_area = iommu_unmap_dma_area,
427#endif
428};
437 429
438void __init ld_mmu_iommu(void) 430void __init ld_mmu_iommu(void)
439{ 431{
440 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); 432 if (flush_page_for_dma_global) {
441 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
442 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
443
444 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
445 /* IO coherent chip */
446 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
447 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
448 } else if (flush_page_for_dma_global) {
449 /* flush_page_for_dma flushes everything, no matter of what page is it */ 433 /* flush_page_for_dma flushes everything, no matter of what page is it */
450 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM); 434 sparc32_dma_ops = &iommu_dma_gflush_ops;
451 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
452 } else { 435 } else {
453 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM); 436 sparc32_dma_ops = &iommu_dma_pflush_ops;
454 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
455 } 437 }
456 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
457 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
458
459#ifdef CONFIG_SBUS
460 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
461 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
462#endif
463 438
464 if (viking_mxcc_present || srmmu_modtype == HyperSparc) { 439 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
465 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); 440 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 13c2169822a8..4c67ae6e5023 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -15,9 +15,23 @@
15#include <asm/leon.h> 15#include <asm/leon.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18#include "srmmu.h"
19
18int leon_flush_during_switch = 1; 20int leon_flush_during_switch = 1;
19int srmmu_swprobe_trace; 21int srmmu_swprobe_trace;
20 22
23static inline unsigned long leon_get_ctable_ptr(void)
24{
25 unsigned int retval;
26
27 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
28 "=r" (retval) :
29 "r" (SRMMU_CTXTBL_PTR),
30 "i" (ASI_LEON_MMUREGS));
31 return (retval & SRMMU_CTX_PMASK) << 4;
32}
33
34
21unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) 35unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
22{ 36{
23 37
@@ -33,10 +47,10 @@ unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
33 if (srmmu_swprobe_trace) 47 if (srmmu_swprobe_trace)
34 printk(KERN_INFO "swprobe: trace on\n"); 48 printk(KERN_INFO "swprobe: trace on\n");
35 49
36 ctxtbl = srmmu_get_ctable_ptr(); 50 ctxtbl = leon_get_ctable_ptr();
37 if (!(ctxtbl)) { 51 if (!(ctxtbl)) {
38 if (srmmu_swprobe_trace) 52 if (srmmu_swprobe_trace)
39 printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n"); 53 printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
40 return 0; 54 return 0;
41 } 55 }
42 if (!_pfn_valid(PFN(ctxtbl))) { 56 if (!_pfn_valid(PFN(ctxtbl))) {
@@ -258,3 +272,80 @@ void leon_switch_mm(void)
258 if (leon_flush_during_switch) 272 if (leon_flush_during_switch)
259 leon_flush_cache_all(); 273 leon_flush_cache_all();
260} 274}
275
276static void leon_flush_cache_mm(struct mm_struct *mm)
277{
278 leon_flush_cache_all();
279}
280
281static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
282{
283 leon_flush_pcache_all(vma, page);
284}
285
286static void leon_flush_cache_range(struct vm_area_struct *vma,
287 unsigned long start,
288 unsigned long end)
289{
290 leon_flush_cache_all();
291}
292
293static void leon_flush_tlb_mm(struct mm_struct *mm)
294{
295 leon_flush_tlb_all();
296}
297
298static void leon_flush_tlb_page(struct vm_area_struct *vma,
299 unsigned long page)
300{
301 leon_flush_tlb_all();
302}
303
304static void leon_flush_tlb_range(struct vm_area_struct *vma,
305 unsigned long start,
306 unsigned long end)
307{
308 leon_flush_tlb_all();
309}
310
311static void leon_flush_page_to_ram(unsigned long page)
312{
313 leon_flush_cache_all();
314}
315
316static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
317{
318 leon_flush_cache_all();
319}
320
321static void leon_flush_page_for_dma(unsigned long page)
322{
323 leon_flush_dcache_all();
324}
325
326void __init poke_leonsparc(void)
327{
328}
329
330static const struct sparc32_cachetlb_ops leon_ops = {
331 .cache_all = leon_flush_cache_all,
332 .cache_mm = leon_flush_cache_mm,
333 .cache_page = leon_flush_cache_page,
334 .cache_range = leon_flush_cache_range,
335 .tlb_all = leon_flush_tlb_all,
336 .tlb_mm = leon_flush_tlb_mm,
337 .tlb_page = leon_flush_tlb_page,
338 .tlb_range = leon_flush_tlb_range,
339 .page_to_ram = leon_flush_page_to_ram,
340 .sig_insns = leon_flush_sig_insns,
341 .page_for_dma = leon_flush_page_for_dma,
342};
343
344void __init init_leon(void)
345{
346 srmmu_name = "LEON";
347 sparc32_cachetlb_ops = &leon_ops;
348 poke_srmmu = poke_leonsparc;
349
350 leon_flush_during_switch = leon_flush_needed();
351}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
deleted file mode 100644
index c5bf2a6c3858..000000000000
--- a/arch/sparc/mm/loadmmu.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * loadmmu.c: This code loads up all the mm function pointers once the
3 * machine type has been determined. It also sets the static
4 * mmu values such as PAGE_NONE, etc.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/init.h>
13
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/mmu_context.h>
17#include <asm/oplib.h>
18
19struct ctx_list *ctx_list_pool;
20struct ctx_list ctx_free;
21struct ctx_list ctx_used;
22
23extern void ld_mmu_sun4c(void);
24extern void ld_mmu_srmmu(void);
25
26void __init load_mmu(void)
27{
28 switch(sparc_cpu_model) {
29 case sun4c:
30 case sun4:
31 ld_mmu_sun4c();
32 break;
33 case sun4m:
34 case sun4d:
35 case sparc_leon:
36 ld_mmu_srmmu();
37 break;
38 default:
39 prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
40 prom_halt();
41 }
42 btfixup();
43}
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
deleted file mode 100644
index 4e62c27147c4..000000000000
--- a/arch/sparc/mm/nosun4c.c
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * nosun4c.c: This file is a bunch of dummies for SMP compiles,
3 * so that it does not need sun4c and avoid ifdefs.
4 *
5 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <asm/pgtable.h>
12
13static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
14
15/* Dummies */
16struct sun4c_mmu_ring {
17 unsigned long xxx1[3];
18 unsigned char xxx2[2];
19 int xxx3;
20};
21struct sun4c_mmu_ring sun4c_kernel_ring;
22struct sun4c_mmu_ring sun4c_kfree_ring;
23unsigned long sun4c_kernel_faults;
24unsigned long *sun4c_memerr_reg;
25
26static void __init should_not_happen(void)
27{
28 prom_printf(shouldnothappen);
29 prom_halt();
30}
31
32unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
33{
34 should_not_happen();
35 return 0;
36}
37
38void __init ld_mmu_sun4c(void)
39{
40 should_not_happen();
41}
42
43void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
44{
45}
46
47void sun4c_unmapioaddr(unsigned long virt_addr)
48{
49}
50
51void sun4c_complete_all_stores(void)
52{
53}
54
55pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
56{
57 return NULL;
58}
59
60pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
61{
62 return NULL;
63}
64
65void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
66{
67}
68
69void __init sun4c_probe_vac(void)
70{
71 should_not_happen();
72}
73
74void __init sun4c_probe_memerr_reg(void)
75{
76 should_not_happen();
77}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index cbef74e793b8..8e97e0305b01 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -48,39 +48,37 @@
48#include <asm/turbosparc.h> 48#include <asm/turbosparc.h>
49#include <asm/leon.h> 49#include <asm/leon.h>
50 50
51#include <asm/btfixup.h> 51#include "srmmu.h"
52 52
53enum mbus_module srmmu_modtype; 53enum mbus_module srmmu_modtype;
54static unsigned int hwbug_bitmask; 54static unsigned int hwbug_bitmask;
55int vac_cache_size; 55int vac_cache_size;
56int vac_line_size; 56int vac_line_size;
57 57
58struct ctx_list *ctx_list_pool;
59struct ctx_list ctx_free;
60struct ctx_list ctx_used;
61
58extern struct resource sparc_iomap; 62extern struct resource sparc_iomap;
59 63
60extern unsigned long last_valid_pfn; 64extern unsigned long last_valid_pfn;
61 65
62extern unsigned long page_kernel;
63
64static pgd_t *srmmu_swapper_pg_dir; 66static pgd_t *srmmu_swapper_pg_dir;
65 67
68const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
69
66#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
71const struct sparc32_cachetlb_ops *local_ops;
72
67#define FLUSH_BEGIN(mm) 73#define FLUSH_BEGIN(mm)
68#define FLUSH_END 74#define FLUSH_END
69#else 75#else
70#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { 76#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
71#define FLUSH_END } 77#define FLUSH_END }
72#endif 78#endif
73 79
74BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
75#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
76
77int flush_page_for_dma_global = 1; 80int flush_page_for_dma_global = 1;
78 81
79#ifdef CONFIG_SMP
80BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
81#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
82#endif
83
84char *srmmu_name; 82char *srmmu_name;
85 83
86ctxd_t *srmmu_ctx_table_phys; 84ctxd_t *srmmu_ctx_table_phys;
@@ -91,28 +89,6 @@ static DEFINE_SPINLOCK(srmmu_context_spinlock);
91 89
92static int is_hypersparc; 90static int is_hypersparc;
93 91
94/*
95 * In general all page table modifications should use the V8 atomic
96 * swap instruction. This insures the mmu and the cpu are in sync
97 * with respect to ref/mod bits in the page tables.
98 */
99static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
100{
101 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
102 return value;
103}
104
105static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
106{
107 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
108}
109
110/* The very generic SRMMU page table operations. */
111static inline int srmmu_device_memory(unsigned long x)
112{
113 return ((x & 0xF0000000) != 0);
114}
115
116static int srmmu_cache_pagetables; 92static int srmmu_cache_pagetables;
117 93
118/* these will be initialized in srmmu_nocache_calcsize() */ 94/* these will be initialized in srmmu_nocache_calcsize() */
@@ -129,145 +105,39 @@ void *srmmu_nocache_pool;
129void *srmmu_nocache_bitmap; 105void *srmmu_nocache_bitmap;
130static struct bit_map srmmu_nocache_map; 106static struct bit_map srmmu_nocache_map;
131 107
132static unsigned long srmmu_pte_pfn(pte_t pte)
133{
134 if (srmmu_device_memory(pte_val(pte))) {
135 /* Just return something that will cause
136 * pfn_valid() to return false. This makes
137 * copy_one_pte() to just directly copy to
138 * PTE over.
139 */
140 return ~0UL;
141 }
142 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
143}
144
145static struct page *srmmu_pmd_page(pmd_t pmd)
146{
147
148 if (srmmu_device_memory(pmd_val(pmd)))
149 BUG();
150 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
151}
152
153static inline unsigned long srmmu_pgd_page(pgd_t pgd)
154{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
155
156
157static inline int srmmu_pte_none(pte_t pte)
158{ return !(pte_val(pte) & 0xFFFFFFF); }
159
160static inline int srmmu_pte_present(pte_t pte)
161{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
162
163static inline void srmmu_pte_clear(pte_t *ptep)
164{ srmmu_set_pte(ptep, __pte(0)); }
165
166static inline int srmmu_pmd_none(pmd_t pmd) 108static inline int srmmu_pmd_none(pmd_t pmd)
167{ return !(pmd_val(pmd) & 0xFFFFFFF); } 109{ return !(pmd_val(pmd) & 0xFFFFFFF); }
168 110
169static inline int srmmu_pmd_bad(pmd_t pmd)
170{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
171
172static inline int srmmu_pmd_present(pmd_t pmd)
173{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
174
175static inline void srmmu_pmd_clear(pmd_t *pmdp) {
176 int i;
177 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
178 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
179}
180
181static inline int srmmu_pgd_none(pgd_t pgd)
182{ return !(pgd_val(pgd) & 0xFFFFFFF); }
183
184static inline int srmmu_pgd_bad(pgd_t pgd)
185{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
186
187static inline int srmmu_pgd_present(pgd_t pgd)
188{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
189
190static inline void srmmu_pgd_clear(pgd_t * pgdp)
191{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
192
193static inline pte_t srmmu_pte_wrprotect(pte_t pte)
194{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
195
196static inline pte_t srmmu_pte_mkclean(pte_t pte)
197{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
198
199static inline pte_t srmmu_pte_mkold(pte_t pte)
200{ return __pte(pte_val(pte) & ~SRMMU_REF);}
201
202static inline pte_t srmmu_pte_mkwrite(pte_t pte)
203{ return __pte(pte_val(pte) | SRMMU_WRITE);}
204
205static inline pte_t srmmu_pte_mkdirty(pte_t pte)
206{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
207
208static inline pte_t srmmu_pte_mkyoung(pte_t pte)
209{ return __pte(pte_val(pte) | SRMMU_REF);}
210
211/*
212 * Conversion functions: convert a page and protection to a page entry,
213 * and a page entry and page directory to the page they refer to.
214 */
215static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
216{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
217
218static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
219{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
220
221static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
222{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
223
224/* XXX should we hyper_flush_whole_icache here - Anton */ 111/* XXX should we hyper_flush_whole_icache here - Anton */
225static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) 112static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
226{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } 113{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
227 114
228static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) 115void pmd_set(pmd_t *pmdp, pte_t *ptep)
229{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
230
231static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
232{ 116{
233 unsigned long ptp; /* Physical address, shifted right by 4 */ 117 unsigned long ptp; /* Physical address, shifted right by 4 */
234 int i; 118 int i;
235 119
236 ptp = __nocache_pa((unsigned long) ptep) >> 4; 120 ptp = __nocache_pa((unsigned long) ptep) >> 4;
237 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 121 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
238 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 122 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
239 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 123 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
240 } 124 }
241} 125}
242 126
243static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) 127void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
244{ 128{
245 unsigned long ptp; /* Physical address, shifted right by 4 */ 129 unsigned long ptp; /* Physical address, shifted right by 4 */
246 int i; 130 int i;
247 131
248 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ 132 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
249 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 133 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
250 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 134 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
251 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 135 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
252 } 136 }
253} 137}
254 138
255static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
256{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
257
258/* to find an entry in a top-level page table... */
259static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
260{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
261
262/* Find an entry in the second-level page table.. */
263static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
264{
265 return (pmd_t *) srmmu_pgd_page(*dir) +
266 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
267}
268
269/* Find an entry in the third-level page table.. */ 139/* Find an entry in the third-level page table.. */
270static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) 140pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
271{ 141{
272 void *pte; 142 void *pte;
273 143
@@ -276,23 +146,6 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
276 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 146 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
277} 147}
278 148
279static unsigned long srmmu_swp_type(swp_entry_t entry)
280{
281 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
282}
283
284static unsigned long srmmu_swp_offset(swp_entry_t entry)
285{
286 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
287}
288
289static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
290{
291 return (swp_entry_t) {
292 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
293 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
294}
295
296/* 149/*
297 * size: bytes to allocate in the nocache area. 150 * size: bytes to allocate in the nocache area.
298 * align: bytes, number to align at. 151 * align: bytes, number to align at.
@@ -325,7 +178,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
325 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); 178 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
326} 179}
327 180
328static unsigned long srmmu_get_nocache(int size, int align) 181unsigned long srmmu_get_nocache(int size, int align)
329{ 182{
330 unsigned long tmp; 183 unsigned long tmp;
331 184
@@ -337,7 +190,7 @@ static unsigned long srmmu_get_nocache(int size, int align)
337 return tmp; 190 return tmp;
338} 191}
339 192
340static void srmmu_free_nocache(unsigned long vaddr, int size) 193void srmmu_free_nocache(unsigned long vaddr, int size)
341{ 194{
342 int offset; 195 int offset;
343 196
@@ -429,15 +282,15 @@ static void __init srmmu_nocache_init(void)
429 282
430 while (vaddr < srmmu_nocache_end) { 283 while (vaddr < srmmu_nocache_end) {
431 pgd = pgd_offset_k(vaddr); 284 pgd = pgd_offset_k(vaddr);
432 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); 285 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
433 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); 286 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
434 287
435 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); 288 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
436 289
437 if (srmmu_cache_pagetables) 290 if (srmmu_cache_pagetables)
438 pteval |= SRMMU_CACHE; 291 pteval |= SRMMU_CACHE;
439 292
440 srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); 293 set_pte(__nocache_fix(pte), __pte(pteval));
441 294
442 vaddr += PAGE_SIZE; 295 vaddr += PAGE_SIZE;
443 paddr += PAGE_SIZE; 296 paddr += PAGE_SIZE;
@@ -447,7 +300,7 @@ static void __init srmmu_nocache_init(void)
447 flush_tlb_all(); 300 flush_tlb_all();
448} 301}
449 302
450static inline pgd_t *srmmu_get_pgd_fast(void) 303pgd_t *get_pgd_fast(void)
451{ 304{
452 pgd_t *pgd = NULL; 305 pgd_t *pgd = NULL;
453 306
@@ -462,21 +315,6 @@ static inline pgd_t *srmmu_get_pgd_fast(void)
462 return pgd; 315 return pgd;
463} 316}
464 317
465static void srmmu_free_pgd_fast(pgd_t *pgd)
466{
467 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
468}
469
470static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
471{
472 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
473}
474
475static void srmmu_pmd_free(pmd_t * pmd)
476{
477 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
478}
479
480/* 318/*
481 * Hardware needs alignment to 256 only, but we align to whole page size 319 * Hardware needs alignment to 256 only, but we align to whole page size
482 * to reduce fragmentation problems due to the buddy principle. 320 * to reduce fragmentation problems due to the buddy principle.
@@ -485,31 +323,19 @@ static void srmmu_pmd_free(pmd_t * pmd)
485 * Alignments up to the page size are the same for physical and virtual 323 * Alignments up to the page size are the same for physical and virtual
486 * addresses of the nocache area. 324 * addresses of the nocache area.
487 */ 325 */
488static pte_t * 326pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
489srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
490{
491 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
492}
493
494static pgtable_t
495srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
496{ 327{
497 unsigned long pte; 328 unsigned long pte;
498 struct page *page; 329 struct page *page;
499 330
500 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) 331 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
501 return NULL; 332 return NULL;
502 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); 333 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
503 pgtable_page_ctor(page); 334 pgtable_page_ctor(page);
504 return page; 335 return page;
505} 336}
506 337
507static void srmmu_free_pte_fast(pte_t *pte) 338void pte_free(struct mm_struct *mm, pgtable_t pte)
508{
509 srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
510}
511
512static void srmmu_pte_free(pgtable_t pte)
513{ 339{
514 unsigned long p; 340 unsigned long p;
515 341
@@ -560,8 +386,8 @@ static inline void free_context(int context)
560} 386}
561 387
562 388
563static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 389void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
564 struct task_struct *tsk, int cpu) 390 struct task_struct *tsk)
565{ 391{
566 if(mm->context == NO_CONTEXT) { 392 if(mm->context == NO_CONTEXT) {
567 spin_lock(&srmmu_context_spinlock); 393 spin_lock(&srmmu_context_spinlock);
@@ -590,8 +416,8 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
590 416
591 physaddr &= PAGE_MASK; 417 physaddr &= PAGE_MASK;
592 pgdp = pgd_offset_k(virt_addr); 418 pgdp = pgd_offset_k(virt_addr);
593 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 419 pmdp = pmd_offset(pgdp, virt_addr);
594 ptep = srmmu_pte_offset(pmdp, virt_addr); 420 ptep = pte_offset_kernel(pmdp, virt_addr);
595 tmp = (physaddr >> 4) | SRMMU_ET_PTE; 421 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
596 422
597 /* 423 /*
@@ -602,11 +428,11 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
602 tmp |= (bus_type << 28); 428 tmp |= (bus_type << 28);
603 tmp |= SRMMU_PRIV; 429 tmp |= SRMMU_PRIV;
604 __flush_page_to_ram(virt_addr); 430 __flush_page_to_ram(virt_addr);
605 srmmu_set_pte(ptep, __pte(tmp)); 431 set_pte(ptep, __pte(tmp));
606} 432}
607 433
608static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, 434void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
609 unsigned long xva, unsigned int len) 435 unsigned long xva, unsigned int len)
610{ 436{
611 while (len != 0) { 437 while (len != 0) {
612 len -= PAGE_SIZE; 438 len -= PAGE_SIZE;
@@ -624,14 +450,14 @@ static inline void srmmu_unmapioaddr(unsigned long virt_addr)
624 pte_t *ptep; 450 pte_t *ptep;
625 451
626 pgdp = pgd_offset_k(virt_addr); 452 pgdp = pgd_offset_k(virt_addr);
627 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 453 pmdp = pmd_offset(pgdp, virt_addr);
628 ptep = srmmu_pte_offset(pmdp, virt_addr); 454 ptep = pte_offset_kernel(pmdp, virt_addr);
629 455
630 /* No need to flush uncacheable page. */ 456 /* No need to flush uncacheable page. */
631 srmmu_pte_clear(ptep); 457 __pte_clear(ptep);
632} 458}
633 459
634static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) 460void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
635{ 461{
636 while (len != 0) { 462 while (len != 0) {
637 len -= PAGE_SIZE; 463 len -= PAGE_SIZE;
@@ -647,10 +473,9 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
647 * pool. As a side effect we are putting a little too much pressure 473 * pool. As a side effect we are putting a little too much pressure
648 * on the gfp() subsystem. This setup also makes the logic of the 474 * on the gfp() subsystem. This setup also makes the logic of the
649 * iommu mapping code a lot easier as we can transparently handle 475 * iommu mapping code a lot easier as we can transparently handle
650 * mappings on the kernel stack without any special code as we did 476 * mappings on the kernel stack without any special code.
651 * need on the sun4c.
652 */ 477 */
653static struct thread_info *srmmu_alloc_thread_info_node(int node) 478struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
654{ 479{
655 struct thread_info *ret; 480 struct thread_info *ret;
656 481
@@ -664,7 +489,7 @@ static struct thread_info *srmmu_alloc_thread_info_node(int node)
664 return ret; 489 return ret;
665} 490}
666 491
667static void srmmu_free_thread_info(struct thread_info *ti) 492void free_thread_info(struct thread_info *ti)
668{ 493{
669 free_pages((unsigned long)ti, THREAD_INFO_ORDER); 494 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
670} 495}
@@ -683,38 +508,6 @@ extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long st
683extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 508extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
684extern void tsunami_setup_blockops(void); 509extern void tsunami_setup_blockops(void);
685 510
686/*
687 * Workaround, until we find what's going on with Swift. When low on memory,
688 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
689 * out it is already in page tables/ fault again on the same instruction.
690 * I really don't understand it, have checked it and contexts
691 * are right, flush_tlb_all is done as well, and it faults again...
692 * Strange. -jj
693 *
694 * The following code is a deadwood that may be necessary when
695 * we start to make precise page flushes again. --zaitcev
696 */
697static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
698{
699#if 0
700 static unsigned long last;
701 unsigned int val;
702 /* unsigned int n; */
703
704 if (address == last) {
705 val = srmmu_hwprobe(address);
706 if (val != 0 && pte_val(*ptep) != val) {
707 printk("swift_update_mmu_cache: "
708 "addr %lx put %08x probed %08x from %p\n",
709 address, pte_val(*ptep), val,
710 __builtin_return_address(0));
711 srmmu_flush_whole_tlb();
712 }
713 }
714 last = address;
715#endif
716}
717
718/* swift.S */ 511/* swift.S */
719extern void swift_flush_cache_all(void); 512extern void swift_flush_cache_all(void);
720extern void swift_flush_cache_mm(struct mm_struct *mm); 513extern void swift_flush_cache_mm(struct mm_struct *mm);
@@ -767,244 +560,6 @@ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
767 * with respect to cache coherency. 560 * with respect to cache coherency.
768 */ 561 */
769 562
770/* Cypress flushes. */
771static void cypress_flush_cache_all(void)
772{
773 volatile unsigned long cypress_sucks;
774 unsigned long faddr, tagval;
775
776 flush_user_windows();
777 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
778 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
779 "=r" (tagval) :
780 "r" (faddr), "r" (0x40000),
781 "i" (ASI_M_DATAC_TAG));
782
783 /* If modified and valid, kick it. */
784 if((tagval & 0x60) == 0x60)
785 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
786 }
787}
788
789static void cypress_flush_cache_mm(struct mm_struct *mm)
790{
791 register unsigned long a, b, c, d, e, f, g;
792 unsigned long flags, faddr;
793 int octx;
794
795 FLUSH_BEGIN(mm)
796 flush_user_windows();
797 local_irq_save(flags);
798 octx = srmmu_get_context();
799 srmmu_set_context(mm->context);
800 a = 0x20; b = 0x40; c = 0x60;
801 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
802
803 faddr = (0x10000 - 0x100);
804 goto inside;
805 do {
806 faddr -= 0x100;
807 inside:
808 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
809 "sta %%g0, [%0 + %2] %1\n\t"
810 "sta %%g0, [%0 + %3] %1\n\t"
811 "sta %%g0, [%0 + %4] %1\n\t"
812 "sta %%g0, [%0 + %5] %1\n\t"
813 "sta %%g0, [%0 + %6] %1\n\t"
814 "sta %%g0, [%0 + %7] %1\n\t"
815 "sta %%g0, [%0 + %8] %1\n\t" : :
816 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
817 "r" (a), "r" (b), "r" (c), "r" (d),
818 "r" (e), "r" (f), "r" (g));
819 } while(faddr);
820 srmmu_set_context(octx);
821 local_irq_restore(flags);
822 FLUSH_END
823}
824
825static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
826{
827 struct mm_struct *mm = vma->vm_mm;
828 register unsigned long a, b, c, d, e, f, g;
829 unsigned long flags, faddr;
830 int octx;
831
832 FLUSH_BEGIN(mm)
833 flush_user_windows();
834 local_irq_save(flags);
835 octx = srmmu_get_context();
836 srmmu_set_context(mm->context);
837 a = 0x20; b = 0x40; c = 0x60;
838 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
839
840 start &= SRMMU_REAL_PMD_MASK;
841 while(start < end) {
842 faddr = (start + (0x10000 - 0x100));
843 goto inside;
844 do {
845 faddr -= 0x100;
846 inside:
847 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
848 "sta %%g0, [%0 + %2] %1\n\t"
849 "sta %%g0, [%0 + %3] %1\n\t"
850 "sta %%g0, [%0 + %4] %1\n\t"
851 "sta %%g0, [%0 + %5] %1\n\t"
852 "sta %%g0, [%0 + %6] %1\n\t"
853 "sta %%g0, [%0 + %7] %1\n\t"
854 "sta %%g0, [%0 + %8] %1\n\t" : :
855 "r" (faddr),
856 "i" (ASI_M_FLUSH_SEG),
857 "r" (a), "r" (b), "r" (c), "r" (d),
858 "r" (e), "r" (f), "r" (g));
859 } while (faddr != start);
860 start += SRMMU_REAL_PMD_SIZE;
861 }
862 srmmu_set_context(octx);
863 local_irq_restore(flags);
864 FLUSH_END
865}
866
867static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
868{
869 register unsigned long a, b, c, d, e, f, g;
870 struct mm_struct *mm = vma->vm_mm;
871 unsigned long flags, line;
872 int octx;
873
874 FLUSH_BEGIN(mm)
875 flush_user_windows();
876 local_irq_save(flags);
877 octx = srmmu_get_context();
878 srmmu_set_context(mm->context);
879 a = 0x20; b = 0x40; c = 0x60;
880 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
881
882 page &= PAGE_MASK;
883 line = (page + PAGE_SIZE) - 0x100;
884 goto inside;
885 do {
886 line -= 0x100;
887 inside:
888 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
889 "sta %%g0, [%0 + %2] %1\n\t"
890 "sta %%g0, [%0 + %3] %1\n\t"
891 "sta %%g0, [%0 + %4] %1\n\t"
892 "sta %%g0, [%0 + %5] %1\n\t"
893 "sta %%g0, [%0 + %6] %1\n\t"
894 "sta %%g0, [%0 + %7] %1\n\t"
895 "sta %%g0, [%0 + %8] %1\n\t" : :
896 "r" (line),
897 "i" (ASI_M_FLUSH_PAGE),
898 "r" (a), "r" (b), "r" (c), "r" (d),
899 "r" (e), "r" (f), "r" (g));
900 } while(line != page);
901 srmmu_set_context(octx);
902 local_irq_restore(flags);
903 FLUSH_END
904}
905
906/* Cypress is copy-back, at least that is how we configure it. */
907static void cypress_flush_page_to_ram(unsigned long page)
908{
909 register unsigned long a, b, c, d, e, f, g;
910 unsigned long line;
911
912 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
913 page &= PAGE_MASK;
914 line = (page + PAGE_SIZE) - 0x100;
915 goto inside;
916 do {
917 line -= 0x100;
918 inside:
919 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
920 "sta %%g0, [%0 + %2] %1\n\t"
921 "sta %%g0, [%0 + %3] %1\n\t"
922 "sta %%g0, [%0 + %4] %1\n\t"
923 "sta %%g0, [%0 + %5] %1\n\t"
924 "sta %%g0, [%0 + %6] %1\n\t"
925 "sta %%g0, [%0 + %7] %1\n\t"
926 "sta %%g0, [%0 + %8] %1\n\t" : :
927 "r" (line),
928 "i" (ASI_M_FLUSH_PAGE),
929 "r" (a), "r" (b), "r" (c), "r" (d),
930 "r" (e), "r" (f), "r" (g));
931 } while(line != page);
932}
933
934/* Cypress is also IO cache coherent. */
935static void cypress_flush_page_for_dma(unsigned long page)
936{
937}
938
939/* Cypress has unified L2 VIPT, from which both instructions and data
940 * are stored. It does not have an onboard icache of any sort, therefore
941 * no flush is necessary.
942 */
943static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
944{
945}
946
947static void cypress_flush_tlb_all(void)
948{
949 srmmu_flush_whole_tlb();
950}
951
952static void cypress_flush_tlb_mm(struct mm_struct *mm)
953{
954 FLUSH_BEGIN(mm)
955 __asm__ __volatile__(
956 "lda [%0] %3, %%g5\n\t"
957 "sta %2, [%0] %3\n\t"
958 "sta %%g0, [%1] %4\n\t"
959 "sta %%g5, [%0] %3\n"
960 : /* no outputs */
961 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
962 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
963 : "g5");
964 FLUSH_END
965}
966
967static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
968{
969 struct mm_struct *mm = vma->vm_mm;
970 unsigned long size;
971
972 FLUSH_BEGIN(mm)
973 start &= SRMMU_PGDIR_MASK;
974 size = SRMMU_PGDIR_ALIGN(end) - start;
975 __asm__ __volatile__(
976 "lda [%0] %5, %%g5\n\t"
977 "sta %1, [%0] %5\n"
978 "1:\n\t"
979 "subcc %3, %4, %3\n\t"
980 "bne 1b\n\t"
981 " sta %%g0, [%2 + %3] %6\n\t"
982 "sta %%g5, [%0] %5\n"
983 : /* no outputs */
984 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
985 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
986 "i" (ASI_M_FLUSH_PROBE)
987 : "g5", "cc");
988 FLUSH_END
989}
990
991static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
992{
993 struct mm_struct *mm = vma->vm_mm;
994
995 FLUSH_BEGIN(mm)
996 __asm__ __volatile__(
997 "lda [%0] %3, %%g5\n\t"
998 "sta %1, [%0] %3\n\t"
999 "sta %%g0, [%2] %4\n\t"
1000 "sta %%g5, [%0] %3\n"
1001 : /* no outputs */
1002 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
1003 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1004 : "g5");
1005 FLUSH_END
1006}
1007
1008/* viking.S */ 563/* viking.S */
1009extern void viking_flush_cache_all(void); 564extern void viking_flush_cache_all(void);
1010extern void viking_flush_cache_mm(struct mm_struct *mm); 565extern void viking_flush_cache_mm(struct mm_struct *mm);
@@ -1065,21 +620,21 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
1065 620
1066 while(start < end) { 621 while(start < end) {
1067 pgdp = pgd_offset_k(start); 622 pgdp = pgd_offset_k(start);
1068 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 623 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1069 pmdp = (pmd_t *) __srmmu_get_nocache( 624 pmdp = (pmd_t *) __srmmu_get_nocache(
1070 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 625 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1071 if (pmdp == NULL) 626 if (pmdp == NULL)
1072 early_pgtable_allocfail("pmd"); 627 early_pgtable_allocfail("pmd");
1073 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 628 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1074 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 629 pgd_set(__nocache_fix(pgdp), pmdp);
1075 } 630 }
1076 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 631 pmdp = pmd_offset(__nocache_fix(pgdp), start);
1077 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 632 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1078 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 633 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
1079 if (ptep == NULL) 634 if (ptep == NULL)
1080 early_pgtable_allocfail("pte"); 635 early_pgtable_allocfail("pte");
1081 memset(__nocache_fix(ptep), 0, PTE_SIZE); 636 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1082 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 637 pmd_set(__nocache_fix(pmdp), ptep);
1083 } 638 }
1084 if (start > (0xffffffffUL - PMD_SIZE)) 639 if (start > (0xffffffffUL - PMD_SIZE))
1085 break; 640 break;
@@ -1096,21 +651,21 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
1096 651
1097 while(start < end) { 652 while(start < end) {
1098 pgdp = pgd_offset_k(start); 653 pgdp = pgd_offset_k(start);
1099 if(srmmu_pgd_none(*pgdp)) { 654 if (pgd_none(*pgdp)) {
1100 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 655 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1101 if (pmdp == NULL) 656 if (pmdp == NULL)
1102 early_pgtable_allocfail("pmd"); 657 early_pgtable_allocfail("pmd");
1103 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); 658 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
1104 srmmu_pgd_set(pgdp, pmdp); 659 pgd_set(pgdp, pmdp);
1105 } 660 }
1106 pmdp = srmmu_pmd_offset(pgdp, start); 661 pmdp = pmd_offset(pgdp, start);
1107 if(srmmu_pmd_none(*pmdp)) { 662 if(srmmu_pmd_none(*pmdp)) {
1108 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 663 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1109 PTE_SIZE); 664 PTE_SIZE);
1110 if (ptep == NULL) 665 if (ptep == NULL)
1111 early_pgtable_allocfail("pte"); 666 early_pgtable_allocfail("pte");
1112 memset(ptep, 0, PTE_SIZE); 667 memset(ptep, 0, PTE_SIZE);
1113 srmmu_pmd_set(pmdp, ptep); 668 pmd_set(pmdp, ptep);
1114 } 669 }
1115 if (start > (0xffffffffUL - PMD_SIZE)) 670 if (start > (0xffffffffUL - PMD_SIZE))
1116 break; 671 break;
@@ -1162,21 +717,21 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
1162 start += SRMMU_PGDIR_SIZE; 717 start += SRMMU_PGDIR_SIZE;
1163 continue; 718 continue;
1164 } 719 }
1165 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 720 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1166 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 721 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1167 if (pmdp == NULL) 722 if (pmdp == NULL)
1168 early_pgtable_allocfail("pmd"); 723 early_pgtable_allocfail("pmd");
1169 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 724 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1170 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 725 pgd_set(__nocache_fix(pgdp), pmdp);
1171 } 726 }
1172 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 727 pmdp = pmd_offset(__nocache_fix(pgdp), start);
1173 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 728 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1174 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 729 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1175 PTE_SIZE); 730 PTE_SIZE);
1176 if (ptep == NULL) 731 if (ptep == NULL)
1177 early_pgtable_allocfail("pte"); 732 early_pgtable_allocfail("pte");
1178 memset(__nocache_fix(ptep), 0, PTE_SIZE); 733 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1179 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 734 pmd_set(__nocache_fix(pmdp), ptep);
1180 } 735 }
1181 if(what == 1) { 736 if(what == 1) {
1182 /* 737 /*
@@ -1190,7 +745,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
1190 start += SRMMU_REAL_PMD_SIZE; 745 start += SRMMU_REAL_PMD_SIZE;
1191 continue; 746 continue;
1192 } 747 }
1193 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); 748 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
1194 *(pte_t *)__nocache_fix(ptep) = __pte(prompte); 749 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1195 start += PAGE_SIZE; 750 start += PAGE_SIZE;
1196 } 751 }
@@ -1231,13 +786,6 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1231 return vstart; 786 return vstart;
1232} 787}
1233 788
1234static inline void memprobe_error(char *msg)
1235{
1236 prom_printf(msg);
1237 prom_printf("Halting now...\n");
1238 prom_halt();
1239}
1240
1241static inline void map_kernel(void) 789static inline void map_kernel(void)
1242{ 790{
1243 int i; 791 int i;
@@ -1249,8 +797,6 @@ static inline void map_kernel(void)
1249 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 797 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1250 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); 798 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1251 } 799 }
1252
1253 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
1254} 800}
1255 801
1256/* Paging initialization on the Sparc Reference MMU. */ 802/* Paging initialization on the Sparc Reference MMU. */
@@ -1312,7 +858,7 @@ void __init srmmu_paging_init(void)
1312 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 858 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1313#ifdef CONFIG_SMP 859#ifdef CONFIG_SMP
1314 /* Stop from hanging here... */ 860 /* Stop from hanging here... */
1315 local_flush_tlb_all(); 861 local_ops->tlb_all();
1316#else 862#else
1317 flush_tlb_all(); 863 flush_tlb_all();
1318#endif 864#endif
@@ -1326,8 +872,8 @@ void __init srmmu_paging_init(void)
1326 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); 872 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1327 873
1328 pgd = pgd_offset_k(PKMAP_BASE); 874 pgd = pgd_offset_k(PKMAP_BASE);
1329 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); 875 pmd = pmd_offset(pgd, PKMAP_BASE);
1330 pte = srmmu_pte_offset(pmd, PKMAP_BASE); 876 pte = pte_offset_kernel(pmd, PKMAP_BASE);
1331 pkmap_page_table = pte; 877 pkmap_page_table = pte;
1332 878
1333 flush_cache_all(); 879 flush_cache_all();
@@ -1359,7 +905,7 @@ void __init srmmu_paging_init(void)
1359 } 905 }
1360} 906}
1361 907
1362static void srmmu_mmu_info(struct seq_file *m) 908void mmu_info(struct seq_file *m)
1363{ 909{
1364 seq_printf(m, 910 seq_printf(m,
1365 "MMU type\t: %s\n" 911 "MMU type\t: %s\n"
@@ -1372,11 +918,7 @@ static void srmmu_mmu_info(struct seq_file *m)
1372 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 918 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1373} 919}
1374 920
1375static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) 921void destroy_context(struct mm_struct *mm)
1376{
1377}
1378
1379static void srmmu_destroy_context(struct mm_struct *mm)
1380{ 922{
1381 923
1382 if(mm->context != NO_CONTEXT) { 924 if(mm->context != NO_CONTEXT) {
@@ -1474,6 +1016,20 @@ static void __cpuinit poke_hypersparc(void)
1474 clear = srmmu_get_fstatus(); 1016 clear = srmmu_get_fstatus();
1475} 1017}
1476 1018
1019static const struct sparc32_cachetlb_ops hypersparc_ops = {
1020 .cache_all = hypersparc_flush_cache_all,
1021 .cache_mm = hypersparc_flush_cache_mm,
1022 .cache_page = hypersparc_flush_cache_page,
1023 .cache_range = hypersparc_flush_cache_range,
1024 .tlb_all = hypersparc_flush_tlb_all,
1025 .tlb_mm = hypersparc_flush_tlb_mm,
1026 .tlb_page = hypersparc_flush_tlb_page,
1027 .tlb_range = hypersparc_flush_tlb_range,
1028 .page_to_ram = hypersparc_flush_page_to_ram,
1029 .sig_insns = hypersparc_flush_sig_insns,
1030 .page_for_dma = hypersparc_flush_page_for_dma,
1031};
1032
1477static void __init init_hypersparc(void) 1033static void __init init_hypersparc(void)
1478{ 1034{
1479 srmmu_name = "ROSS HyperSparc"; 1035 srmmu_name = "ROSS HyperSparc";
@@ -1482,118 +1038,13 @@ static void __init init_hypersparc(void)
1482 init_vac_layout(); 1038 init_vac_layout();
1483 1039
1484 is_hypersparc = 1; 1040 is_hypersparc = 1;
1485 1041 sparc32_cachetlb_ops = &hypersparc_ops;
1486 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1487 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1488 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1489 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1490 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1491 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1492 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1493
1494 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1495 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1496 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1497 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1498
1499 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1500 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1501 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1502
1503 1042
1504 poke_srmmu = poke_hypersparc; 1043 poke_srmmu = poke_hypersparc;
1505 1044
1506 hypersparc_setup_blockops(); 1045 hypersparc_setup_blockops();
1507} 1046}
1508 1047
1509static void __cpuinit poke_cypress(void)
1510{
1511 unsigned long mreg = srmmu_get_mmureg();
1512 unsigned long faddr, tagval;
1513 volatile unsigned long cypress_sucks;
1514 volatile unsigned long clear;
1515
1516 clear = srmmu_get_faddr();
1517 clear = srmmu_get_fstatus();
1518
1519 if (!(mreg & CYPRESS_CENABLE)) {
1520 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1521 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1522 "sta %%g0, [%0] %2\n\t" : :
1523 "r" (faddr), "r" (0x40000),
1524 "i" (ASI_M_DATAC_TAG));
1525 }
1526 } else {
1527 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1528 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1529 "=r" (tagval) :
1530 "r" (faddr), "r" (0x40000),
1531 "i" (ASI_M_DATAC_TAG));
1532
1533 /* If modified and valid, kick it. */
1534 if((tagval & 0x60) == 0x60)
1535 cypress_sucks = *(unsigned long *)
1536 (0xf0020000 + faddr);
1537 }
1538 }
1539
1540 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1541 clear = srmmu_get_faddr();
1542 clear = srmmu_get_fstatus();
1543
1544 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1545 srmmu_set_mmureg(mreg);
1546}
1547
1548static void __init init_cypress_common(void)
1549{
1550 init_vac_layout();
1551
1552 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1553 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1554 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1555 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1556 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1557 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1558 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1559
1560 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1561 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1562 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1563 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1564
1565
1566 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1567 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1568 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1569
1570 poke_srmmu = poke_cypress;
1571}
1572
1573static void __init init_cypress_604(void)
1574{
1575 srmmu_name = "ROSS Cypress-604(UP)";
1576 srmmu_modtype = Cypress;
1577 init_cypress_common();
1578}
1579
1580static void __init init_cypress_605(unsigned long mrev)
1581{
1582 srmmu_name = "ROSS Cypress-605(MP)";
1583 if(mrev == 0xe) {
1584 srmmu_modtype = Cypress_vE;
1585 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1586 } else {
1587 if(mrev == 0xd) {
1588 srmmu_modtype = Cypress_vD;
1589 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1590 } else {
1591 srmmu_modtype = Cypress;
1592 }
1593 }
1594 init_cypress_common();
1595}
1596
1597static void __cpuinit poke_swift(void) 1048static void __cpuinit poke_swift(void)
1598{ 1049{
1599 unsigned long mreg; 1050 unsigned long mreg;
@@ -1617,6 +1068,20 @@ static void __cpuinit poke_swift(void)
1617 srmmu_set_mmureg(mreg); 1068 srmmu_set_mmureg(mreg);
1618} 1069}
1619 1070
1071static const struct sparc32_cachetlb_ops swift_ops = {
1072 .cache_all = swift_flush_cache_all,
1073 .cache_mm = swift_flush_cache_mm,
1074 .cache_page = swift_flush_cache_page,
1075 .cache_range = swift_flush_cache_range,
1076 .tlb_all = swift_flush_tlb_all,
1077 .tlb_mm = swift_flush_tlb_mm,
1078 .tlb_page = swift_flush_tlb_page,
1079 .tlb_range = swift_flush_tlb_range,
1080 .page_to_ram = swift_flush_page_to_ram,
1081 .sig_insns = swift_flush_sig_insns,
1082 .page_for_dma = swift_flush_page_for_dma,
1083};
1084
1620#define SWIFT_MASKID_ADDR 0x10003018 1085#define SWIFT_MASKID_ADDR 0x10003018
1621static void __init init_swift(void) 1086static void __init init_swift(void)
1622{ 1087{
@@ -1667,23 +1132,7 @@ static void __init init_swift(void)
1667 break; 1132 break;
1668 } 1133 }
1669 1134
1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1135 sparc32_cachetlb_ops = &swift_ops;
1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1672 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1673 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1674
1675
1676 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1677 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1678 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1679 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1680
1681 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1682 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1683 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1684
1685 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1686
1687 flush_page_for_dma_global = 0; 1136 flush_page_for_dma_global = 0;
1688 1137
1689 /* 1138 /*
@@ -1816,26 +1265,25 @@ static void __cpuinit poke_turbosparc(void)
1816 srmmu_set_mmureg(mreg); 1265 srmmu_set_mmureg(mreg);
1817} 1266}
1818 1267
1268static const struct sparc32_cachetlb_ops turbosparc_ops = {
1269 .cache_all = turbosparc_flush_cache_all,
1270 .cache_mm = turbosparc_flush_cache_mm,
1271 .cache_page = turbosparc_flush_cache_page,
1272 .cache_range = turbosparc_flush_cache_range,
1273 .tlb_all = turbosparc_flush_tlb_all,
1274 .tlb_mm = turbosparc_flush_tlb_mm,
1275 .tlb_page = turbosparc_flush_tlb_page,
1276 .tlb_range = turbosparc_flush_tlb_range,
1277 .page_to_ram = turbosparc_flush_page_to_ram,
1278 .sig_insns = turbosparc_flush_sig_insns,
1279 .page_for_dma = turbosparc_flush_page_for_dma,
1280};
1281
1819static void __init init_turbosparc(void) 1282static void __init init_turbosparc(void)
1820{ 1283{
1821 srmmu_name = "Fujitsu TurboSparc"; 1284 srmmu_name = "Fujitsu TurboSparc";
1822 srmmu_modtype = TurboSparc; 1285 srmmu_modtype = TurboSparc;
1823 1286 sparc32_cachetlb_ops = &turbosparc_ops;
1824 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1825 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1826 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1827 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1828
1829 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1830 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1831 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1832 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1833
1834 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1835
1836 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1837 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1838
1839 poke_srmmu = poke_turbosparc; 1287 poke_srmmu = poke_turbosparc;
1840} 1288}
1841 1289
@@ -1850,6 +1298,20 @@ static void __cpuinit poke_tsunami(void)
1850 srmmu_set_mmureg(mreg); 1298 srmmu_set_mmureg(mreg);
1851} 1299}
1852 1300
1301static const struct sparc32_cachetlb_ops tsunami_ops = {
1302 .cache_all = tsunami_flush_cache_all,
1303 .cache_mm = tsunami_flush_cache_mm,
1304 .cache_page = tsunami_flush_cache_page,
1305 .cache_range = tsunami_flush_cache_range,
1306 .tlb_all = tsunami_flush_tlb_all,
1307 .tlb_mm = tsunami_flush_tlb_mm,
1308 .tlb_page = tsunami_flush_tlb_page,
1309 .tlb_range = tsunami_flush_tlb_range,
1310 .page_to_ram = tsunami_flush_page_to_ram,
1311 .sig_insns = tsunami_flush_sig_insns,
1312 .page_for_dma = tsunami_flush_page_for_dma,
1313};
1314
1853static void __init init_tsunami(void) 1315static void __init init_tsunami(void)
1854{ 1316{
1855 /* 1317 /*
@@ -1860,22 +1322,7 @@ static void __init init_tsunami(void)
1860 1322
1861 srmmu_name = "TI Tsunami"; 1323 srmmu_name = "TI Tsunami";
1862 srmmu_modtype = Tsunami; 1324 srmmu_modtype = Tsunami;
1863 1325 sparc32_cachetlb_ops = &tsunami_ops;
1864 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1865 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1866 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1867 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1868
1869
1870 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1871 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1872 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1873 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1874
1875 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1876 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1877 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1878
1879 poke_srmmu = poke_tsunami; 1326 poke_srmmu = poke_tsunami;
1880 1327
1881 tsunami_setup_blockops(); 1328 tsunami_setup_blockops();
@@ -1886,7 +1333,7 @@ static void __cpuinit poke_viking(void)
1886 unsigned long mreg = srmmu_get_mmureg(); 1333 unsigned long mreg = srmmu_get_mmureg();
1887 static int smp_catch; 1334 static int smp_catch;
1888 1335
1889 if(viking_mxcc_present) { 1336 if (viking_mxcc_present) {
1890 unsigned long mxcc_control = mxcc_get_creg(); 1337 unsigned long mxcc_control = mxcc_get_creg();
1891 1338
1892 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); 1339 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1923,6 +1370,52 @@ static void __cpuinit poke_viking(void)
1923 srmmu_set_mmureg(mreg); 1370 srmmu_set_mmureg(mreg);
1924} 1371}
1925 1372
1373static struct sparc32_cachetlb_ops viking_ops = {
1374 .cache_all = viking_flush_cache_all,
1375 .cache_mm = viking_flush_cache_mm,
1376 .cache_page = viking_flush_cache_page,
1377 .cache_range = viking_flush_cache_range,
1378 .tlb_all = viking_flush_tlb_all,
1379 .tlb_mm = viking_flush_tlb_mm,
1380 .tlb_page = viking_flush_tlb_page,
1381 .tlb_range = viking_flush_tlb_range,
1382 .page_to_ram = viking_flush_page_to_ram,
1383 .sig_insns = viking_flush_sig_insns,
1384 .page_for_dma = viking_flush_page_for_dma,
1385};
1386
1387#ifdef CONFIG_SMP
1388/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1389 * perform the local TLB flush and all the other cpus will see it.
1390 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1391 * that requires that we add some synchronization to these flushes.
1392 *
1393 * The bug is that the fifo which keeps track of all the pending TLB
1394 * broadcasts in the system is an entry or two too small, so if we
1395 * have too many going at once we'll overflow that fifo and lose a TLB
1396 * flush resulting in corruption.
1397 *
1398 * Our workaround is to take a global spinlock around the TLB flushes,
1399 * which guarentees we won't ever have too many pending. It's a big
1400 * hammer, but a semaphore like system to make sure we only have N TLB
1401 * flushes going at once will require SMP locking anyways so there's
1402 * no real value in trying any harder than this.
1403 */
1404static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1405 .cache_all = viking_flush_cache_all,
1406 .cache_mm = viking_flush_cache_mm,
1407 .cache_page = viking_flush_cache_page,
1408 .cache_range = viking_flush_cache_range,
1409 .tlb_all = sun4dsmp_flush_tlb_all,
1410 .tlb_mm = sun4dsmp_flush_tlb_mm,
1411 .tlb_page = sun4dsmp_flush_tlb_page,
1412 .tlb_range = sun4dsmp_flush_tlb_range,
1413 .page_to_ram = viking_flush_page_to_ram,
1414 .sig_insns = viking_flush_sig_insns,
1415 .page_for_dma = viking_flush_page_for_dma,
1416};
1417#endif
1418
1926static void __init init_viking(void) 1419static void __init init_viking(void)
1927{ 1420{
1928 unsigned long mreg = srmmu_get_mmureg(); 1421 unsigned long mreg = srmmu_get_mmureg();
@@ -1933,10 +1426,6 @@ static void __init init_viking(void)
1933 viking_mxcc_present = 0; 1426 viking_mxcc_present = 0;
1934 msi_set_sync(); 1427 msi_set_sync();
1935 1428
1936 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1937 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1938 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1939
1940 /* 1429 /*
1941 * We need this to make sure old viking takes no hits 1430 * We need this to make sure old viking takes no hits
1942 * on it's cache for dma snoops to workaround the 1431 * on it's cache for dma snoops to workaround the
@@ -1944,84 +1433,28 @@ static void __init init_viking(void)
1944 * This is only necessary because of the new way in 1433 * This is only necessary because of the new way in
1945 * which we use the IOMMU. 1434 * which we use the IOMMU.
1946 */ 1435 */
1947 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); 1436 viking_ops.page_for_dma = viking_flush_page;
1948 1437#ifdef CONFIG_SMP
1438 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1439#endif
1949 flush_page_for_dma_global = 0; 1440 flush_page_for_dma_global = 0;
1950 } else { 1441 } else {
1951 srmmu_name = "TI Viking/MXCC"; 1442 srmmu_name = "TI Viking/MXCC";
1952 viking_mxcc_present = 1; 1443 viking_mxcc_present = 1;
1953
1954 srmmu_cache_pagetables = 1; 1444 srmmu_cache_pagetables = 1;
1955
1956 /* MXCC vikings lack the DMA snooping bug. */
1957 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1958 } 1445 }
1959 1446
1960 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); 1447 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1961 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); 1448 &viking_ops;
1962 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1963 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1964
1965#ifdef CONFIG_SMP 1449#ifdef CONFIG_SMP
1966 if (sparc_cpu_model == sun4d) { 1450 if (sparc_cpu_model == sun4d)
1967 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); 1451 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1968 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); 1452 &viking_sun4d_smp_ops;
1969 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1970 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1971 } else
1972#endif 1453#endif
1973 {
1974 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1975 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1976 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1977 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1978 }
1979
1980 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1981 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1982 1454
1983 poke_srmmu = poke_viking; 1455 poke_srmmu = poke_viking;
1984} 1456}
1985 1457
1986#ifdef CONFIG_SPARC_LEON
1987
1988void __init poke_leonsparc(void)
1989{
1990}
1991
1992void __init init_leon(void)
1993{
1994
1995 srmmu_name = "LEON";
1996
1997 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1998 BTFIXUPCALL_NORM);
1999 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
2000 BTFIXUPCALL_NORM);
2001 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
2002 BTFIXUPCALL_NORM);
2003 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
2004 BTFIXUPCALL_NORM);
2005 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
2006 BTFIXUPCALL_NORM);
2007
2008 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2009 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2010 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2011 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2012
2013 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
2014 BTFIXUPCALL_NOP);
2015 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
2016
2017 poke_srmmu = poke_leonsparc;
2018
2019 srmmu_cache_pagetables = 0;
2020
2021 leon_flush_during_switch = leon_flush_needed();
2022}
2023#endif
2024
2025/* Probe for the srmmu chip version. */ 1458/* Probe for the srmmu chip version. */
2026static void __init get_srmmu_type(void) 1459static void __init get_srmmu_type(void)
2027{ 1460{
@@ -2052,22 +1485,15 @@ static void __init get_srmmu_type(void)
2052 break; 1485 break;
2053 case 0: 1486 case 0:
2054 case 2: 1487 case 2:
2055 /* Uniprocessor Cypress */
2056 init_cypress_604();
2057 break;
2058 case 10: 1488 case 10:
2059 case 11: 1489 case 11:
2060 case 12: 1490 case 12:
2061 /* _REALLY OLD_ Cypress MP chips... */
2062 case 13: 1491 case 13:
2063 case 14: 1492 case 14:
2064 case 15: 1493 case 15:
2065 /* MP Cypress mmu/cache-controller */
2066 init_cypress_605(mod_rev);
2067 break;
2068 default: 1494 default:
2069 /* Some other Cypress revision, assume a 605. */ 1495 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
2070 init_cypress_605(mod_rev); 1496 prom_halt();
2071 break; 1497 break;
2072 } 1498 }
2073 return; 1499 return;
@@ -2123,203 +1549,193 @@ static void __init get_srmmu_type(void)
2123 srmmu_is_bad(); 1549 srmmu_is_bad();
2124} 1550}
2125 1551
2126/* don't laugh, static pagetables */ 1552#ifdef CONFIG_SMP
2127static void srmmu_check_pgt_cache(int low, int high) 1553/* Local cross-calls. */
1554static void smp_flush_page_for_dma(unsigned long page)
2128{ 1555{
1556 xc1((smpfunc_t) local_ops->page_for_dma, page);
1557 local_ops->page_for_dma(page);
2129} 1558}
2130 1559
2131extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, 1560static void smp_flush_cache_all(void)
2132 tsetup_mmu_patchme, rtrap_mmu_patchme;
2133
2134extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2135 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2136
2137extern unsigned long srmmu_fault;
2138
2139#define PATCH_BRANCH(insn, dest) do { \
2140 iaddr = &(insn); \
2141 daddr = &(dest); \
2142 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2143 } while(0)
2144
2145static void __init patch_window_trap_handlers(void)
2146{ 1561{
2147 unsigned long *iaddr, *daddr; 1562 xc0((smpfunc_t) local_ops->cache_all);
2148 1563 local_ops->cache_all();
2149 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2150 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2151 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2152 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2153 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2154 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2155 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2156} 1564}
2157 1565
2158#ifdef CONFIG_SMP 1566static void smp_flush_tlb_all(void)
2159/* Local cross-calls. */
2160static void smp_flush_page_for_dma(unsigned long page)
2161{ 1567{
2162 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); 1568 xc0((smpfunc_t) local_ops->tlb_all);
2163 local_flush_page_for_dma(page); 1569 local_ops->tlb_all();
2164} 1570}
2165 1571
2166#endif 1572static void smp_flush_cache_mm(struct mm_struct *mm)
2167
2168static pte_t srmmu_pgoff_to_pte(unsigned long pgoff)
2169{ 1573{
2170 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); 1574 if (mm->context != NO_CONTEXT) {
1575 cpumask_t cpu_mask;
1576 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1577 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1578 if (!cpumask_empty(&cpu_mask))
1579 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1580 local_ops->cache_mm(mm);
1581 }
2171} 1582}
2172 1583
2173static unsigned long srmmu_pte_to_pgoff(pte_t pte) 1584static void smp_flush_tlb_mm(struct mm_struct *mm)
2174{ 1585{
2175 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; 1586 if (mm->context != NO_CONTEXT) {
1587 cpumask_t cpu_mask;
1588 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1589 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1590 if (!cpumask_empty(&cpu_mask)) {
1591 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1592 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1593 cpumask_copy(mm_cpumask(mm),
1594 cpumask_of(smp_processor_id()));
1595 }
1596 local_ops->tlb_mm(mm);
1597 }
2176} 1598}
2177 1599
2178static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) 1600static void smp_flush_cache_range(struct vm_area_struct *vma,
1601 unsigned long start,
1602 unsigned long end)
2179{ 1603{
2180 prot &= ~__pgprot(SRMMU_CACHE); 1604 struct mm_struct *mm = vma->vm_mm;
2181 1605
2182 return prot; 1606 if (mm->context != NO_CONTEXT) {
1607 cpumask_t cpu_mask;
1608 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1609 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1610 if (!cpumask_empty(&cpu_mask))
1611 xc3((smpfunc_t) local_ops->cache_range,
1612 (unsigned long) vma, start, end);
1613 local_ops->cache_range(vma, start, end);
1614 }
2183} 1615}
2184 1616
2185/* Load up routines and constants for sun4m and sun4d mmu */ 1617static void smp_flush_tlb_range(struct vm_area_struct *vma,
2186void __init ld_mmu_srmmu(void) 1618 unsigned long start,
1619 unsigned long end)
2187{ 1620{
2188 extern void ld_mmu_iommu(void); 1621 struct mm_struct *mm = vma->vm_mm;
2189 extern void ld_mmu_iounit(void);
2190 extern void ___xchg32_sun4md(void);
2191
2192 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
2193 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
2194 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
2195
2196 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
2197 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2198
2199 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2200 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
2201 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2202 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2203 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
2204 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
2205 1622
2206 /* Functions */ 1623 if (mm->context != NO_CONTEXT) {
2207 BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); 1624 cpumask_t cpu_mask;
2208#ifndef CONFIG_SMP 1625 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2209 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); 1626 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2210#endif 1627 if (!cpumask_empty(&cpu_mask))
2211 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); 1628 xc3((smpfunc_t) local_ops->tlb_range,
1629 (unsigned long) vma, start, end);
1630 local_ops->tlb_range(vma, start, end);
1631 }
1632}
2212 1633
2213 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); 1634static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
2214 BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); 1635{
1636 struct mm_struct *mm = vma->vm_mm;
2215 1637
2216 BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); 1638 if (mm->context != NO_CONTEXT) {
2217 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); 1639 cpumask_t cpu_mask;
2218 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); 1640 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1641 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1642 if (!cpumask_empty(&cpu_mask))
1643 xc2((smpfunc_t) local_ops->cache_page,
1644 (unsigned long) vma, page);
1645 local_ops->cache_page(vma, page);
1646 }
1647}
2219 1648
2220 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); 1649static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2221 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); 1650{
1651 struct mm_struct *mm = vma->vm_mm;
2222 1652
2223 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); 1653 if (mm->context != NO_CONTEXT) {
2224 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); 1654 cpumask_t cpu_mask;
2225 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); 1655 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1656 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1657 if (!cpumask_empty(&cpu_mask))
1658 xc2((smpfunc_t) local_ops->tlb_page,
1659 (unsigned long) vma, page);
1660 local_ops->tlb_page(vma, page);
1661 }
1662}
2226 1663
2227 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); 1664static void smp_flush_page_to_ram(unsigned long page)
2228 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); 1665{
2229 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); 1666 /* Current theory is that those who call this are the one's
2230 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); 1667 * who have just dirtied their cache with the pages contents
1668 * in kernel space, therefore we only run this on local cpu.
1669 *
1670 * XXX This experiment failed, research further... -DaveM
1671 */
1672#if 1
1673 xc1((smpfunc_t) local_ops->page_to_ram, page);
1674#endif
1675 local_ops->page_to_ram(page);
1676}
1677
1678static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1679{
1680 cpumask_t cpu_mask;
1681 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1682 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1683 if (!cpumask_empty(&cpu_mask))
1684 xc2((smpfunc_t) local_ops->sig_insns,
1685 (unsigned long) mm, insn_addr);
1686 local_ops->sig_insns(mm, insn_addr);
1687}
1688
1689static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1690 .cache_all = smp_flush_cache_all,
1691 .cache_mm = smp_flush_cache_mm,
1692 .cache_page = smp_flush_cache_page,
1693 .cache_range = smp_flush_cache_range,
1694 .tlb_all = smp_flush_tlb_all,
1695 .tlb_mm = smp_flush_tlb_mm,
1696 .tlb_page = smp_flush_tlb_page,
1697 .tlb_range = smp_flush_tlb_range,
1698 .page_to_ram = smp_flush_page_to_ram,
1699 .sig_insns = smp_flush_sig_insns,
1700 .page_for_dma = smp_flush_page_for_dma,
1701};
1702#endif
2231 1703
2232 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); 1704/* Load up routines and constants for sun4m and sun4d mmu */
2233 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); 1705void __init load_mmu(void)
2234 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); 1706{
2235 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); 1707 extern void ld_mmu_iommu(void);
2236 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); 1708 extern void ld_mmu_iounit(void);
2237 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
2238
2239 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2240 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2241 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2242
2243 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
2244 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2245 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2246 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
2247 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
2248 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
2249 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
2250 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
2251
2252 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
2253 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
2254 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
2255 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
2256 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
2257 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
2258 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
2259 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
2260 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
2261 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
2262 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2263 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2264
2265 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2266 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2267
2268 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2269 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2270 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2271
2272 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2273
2274 BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
2275 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
2276
2277 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
2278 BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM);
2279 1709
1710 /* Functions */
2280 get_srmmu_type(); 1711 get_srmmu_type();
2281 patch_window_trap_handlers();
2282 1712
2283#ifdef CONFIG_SMP 1713#ifdef CONFIG_SMP
2284 /* El switcheroo... */ 1714 /* El switcheroo... */
1715 local_ops = sparc32_cachetlb_ops;
2285 1716
2286 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); 1717 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
2287 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); 1718 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
2288 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); 1719 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
2289 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); 1720 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
2290 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); 1721 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
2291 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2292 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2293 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2294 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2295 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2296 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2297
2298 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2299 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2300 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2301 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
2302 if (sparc_cpu_model != sun4d &&
2303 sparc_cpu_model != sparc_leon) {
2304 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2305 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2306 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2307 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2308 } 1722 }
2309 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2310 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2311 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
2312 1723
2313 if (poke_srmmu == poke_viking) { 1724 if (poke_srmmu == poke_viking) {
2314 /* Avoid unnecessary cross calls. */ 1725 /* Avoid unnecessary cross calls. */
2315 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); 1726 smp_cachetlb_ops.cache_all = local_ops->cache_all;
2316 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); 1727 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
2317 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); 1728 smp_cachetlb_ops.cache_range = local_ops->cache_range;
2318 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); 1729 smp_cachetlb_ops.cache_page = local_ops->cache_page;
2319 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); 1730
2320 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); 1731 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
2321 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); 1732 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1733 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
2322 } 1734 }
1735
1736 /* It really is const after this point. */
1737 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1738 &smp_cachetlb_ops;
2323#endif 1739#endif
2324 1740
2325 if (sparc_cpu_model == sun4d) 1741 if (sparc_cpu_model == sun4d)
diff --git a/arch/sparc/mm/srmmu.h b/arch/sparc/mm/srmmu.h
new file mode 100644
index 000000000000..5703274ccf89
--- /dev/null
+++ b/arch/sparc/mm/srmmu.h
@@ -0,0 +1,4 @@
1/* srmmu.c */
2extern char *srmmu_name;
3
4extern void (*poke_srmmu)(void);
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
deleted file mode 100644
index 1cf4f198709a..000000000000
--- a/arch/sparc/mm/sun4c.c
+++ /dev/null
@@ -1,2166 +0,0 @@
1/* sun4c.c: Doing in software what should be done in hardware.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
6 * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org)
7 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#define NR_TASK_BUCKETS 512
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/bootmem.h>
17#include <linux/highmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/scatterlist.h>
21#include <linux/bitmap.h>
22
23#include <asm/sections.h>
24#include <asm/page.h>
25#include <asm/pgalloc.h>
26#include <asm/pgtable.h>
27#include <asm/vaddrs.h>
28#include <asm/idprom.h>
29#include <asm/machines.h>
30#include <asm/memreg.h>
31#include <asm/processor.h>
32#include <asm/auxio.h>
33#include <asm/io.h>
34#include <asm/oplib.h>
35#include <asm/openprom.h>
36#include <asm/mmu_context.h>
37#include <asm/highmem.h>
38#include <asm/btfixup.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Because of our dynamic kernel TLB miss strategy, and how
43 * our DVMA mapping allocation works, you _MUST_:
44 *
45 * 1) Disable interrupts _and_ not touch any dynamic kernel
46 * memory while messing with kernel MMU state. By
47 * dynamic memory I mean any object which is not in
48 * the kernel image itself or a thread_union (both of
49 * which are locked into the MMU).
50 * 2) Disable interrupts while messing with user MMU state.
51 */
52
53extern int num_segmaps, num_contexts;
54
55extern unsigned long page_kernel;
56
57/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
58 * So let's save some cycles and just use that everywhere except for that bootup
59 * sanity check.
60 */
61#define SUN4C_VAC_SIZE 65536
62
63#define SUN4C_KERNEL_BUCKETS 32
64
65/* Flushing the cache. */
66struct sun4c_vac_props sun4c_vacinfo;
67unsigned long sun4c_kernel_faults;
68
69/* Invalidate every sun4c cache line tag. */
70static void __init sun4c_flush_all(void)
71{
72 unsigned long begin, end;
73
74 if (sun4c_vacinfo.on)
75 panic("SUN4C: AIEEE, trying to invalidate vac while it is on.");
76
77 /* Clear 'valid' bit in all cache line tags */
78 begin = AC_CACHETAGS;
79 end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
80 while (begin < end) {
81 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
82 "r" (begin), "i" (ASI_CONTROL));
83 begin += sun4c_vacinfo.linesize;
84 }
85}
86
87static void sun4c_flush_context_hw(void)
88{
89 unsigned long end = SUN4C_VAC_SIZE;
90
91 __asm__ __volatile__(
92 "1: addcc %0, -4096, %0\n\t"
93 " bne 1b\n\t"
94 " sta %%g0, [%0] %2"
95 : "=&r" (end)
96 : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
97 : "cc");
98}
99
100/* Must be called minimally with IRQs disabled. */
101static void sun4c_flush_segment_hw(unsigned long addr)
102{
103 if (sun4c_get_segmap(addr) != invalid_segment) {
104 unsigned long vac_size = SUN4C_VAC_SIZE;
105
106 __asm__ __volatile__(
107 "1: addcc %0, -4096, %0\n\t"
108 " bne 1b\n\t"
109 " sta %%g0, [%2 + %0] %3"
110 : "=&r" (vac_size)
111 : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
112 : "cc");
113 }
114}
115
116/* File local boot time fixups. */
117BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long)
118BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long)
119BTFIXUPDEF_CALL(void, sun4c_flush_context, void)
120
121#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
122#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
123#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
124
125/* Must be called minimally with interrupts disabled. */
126static void sun4c_flush_page_hw(unsigned long addr)
127{
128 addr &= PAGE_MASK;
129 if ((int)sun4c_get_pte(addr) < 0)
130 __asm__ __volatile__("sta %%g0, [%0] %1"
131 : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
132}
133
134/* Don't inline the software version as it eats too many cache lines if expanded. */
135static void sun4c_flush_context_sw(void)
136{
137 unsigned long nbytes = SUN4C_VAC_SIZE;
138 unsigned long lsize = sun4c_vacinfo.linesize;
139
140 __asm__ __volatile__(
141 "add %2, %2, %%g1\n\t"
142 "add %2, %%g1, %%g2\n\t"
143 "add %2, %%g2, %%g3\n\t"
144 "add %2, %%g3, %%g4\n\t"
145 "add %2, %%g4, %%g5\n\t"
146 "add %2, %%g5, %%o4\n\t"
147 "add %2, %%o4, %%o5\n"
148 "1:\n\t"
149 "subcc %0, %%o5, %0\n\t"
150 "sta %%g0, [%0] %3\n\t"
151 "sta %%g0, [%0 + %2] %3\n\t"
152 "sta %%g0, [%0 + %%g1] %3\n\t"
153 "sta %%g0, [%0 + %%g2] %3\n\t"
154 "sta %%g0, [%0 + %%g3] %3\n\t"
155 "sta %%g0, [%0 + %%g4] %3\n\t"
156 "sta %%g0, [%0 + %%g5] %3\n\t"
157 "bg 1b\n\t"
158 " sta %%g0, [%1 + %%o4] %3\n"
159 : "=&r" (nbytes)
160 : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
161 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
162}
163
164/* Don't inline the software version as it eats too many cache lines if expanded. */
165static void sun4c_flush_segment_sw(unsigned long addr)
166{
167 if (sun4c_get_segmap(addr) != invalid_segment) {
168 unsigned long nbytes = SUN4C_VAC_SIZE;
169 unsigned long lsize = sun4c_vacinfo.linesize;
170
171 __asm__ __volatile__(
172 "add %2, %2, %%g1\n\t"
173 "add %2, %%g1, %%g2\n\t"
174 "add %2, %%g2, %%g3\n\t"
175 "add %2, %%g3, %%g4\n\t"
176 "add %2, %%g4, %%g5\n\t"
177 "add %2, %%g5, %%o4\n\t"
178 "add %2, %%o4, %%o5\n"
179 "1:\n\t"
180 "subcc %1, %%o5, %1\n\t"
181 "sta %%g0, [%0] %6\n\t"
182 "sta %%g0, [%0 + %2] %6\n\t"
183 "sta %%g0, [%0 + %%g1] %6\n\t"
184 "sta %%g0, [%0 + %%g2] %6\n\t"
185 "sta %%g0, [%0 + %%g3] %6\n\t"
186 "sta %%g0, [%0 + %%g4] %6\n\t"
187 "sta %%g0, [%0 + %%g5] %6\n\t"
188 "sta %%g0, [%0 + %%o4] %6\n\t"
189 "bg 1b\n\t"
190 " add %0, %%o5, %0\n"
191 : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
192 : "0" (addr), "1" (nbytes), "2" (lsize),
193 "i" (ASI_FLUSHSEG)
194 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
195 }
196}
197
198/* Don't inline the software version as it eats too many cache lines if expanded. */
199static void sun4c_flush_page_sw(unsigned long addr)
200{
201 addr &= PAGE_MASK;
202 if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
203 _SUN4C_PAGE_VALID) {
204 unsigned long left = PAGE_SIZE;
205 unsigned long lsize = sun4c_vacinfo.linesize;
206
207 __asm__ __volatile__(
208 "add %2, %2, %%g1\n\t"
209 "add %2, %%g1, %%g2\n\t"
210 "add %2, %%g2, %%g3\n\t"
211 "add %2, %%g3, %%g4\n\t"
212 "add %2, %%g4, %%g5\n\t"
213 "add %2, %%g5, %%o4\n\t"
214 "add %2, %%o4, %%o5\n"
215 "1:\n\t"
216 "subcc %1, %%o5, %1\n\t"
217 "sta %%g0, [%0] %6\n\t"
218 "sta %%g0, [%0 + %2] %6\n\t"
219 "sta %%g0, [%0 + %%g1] %6\n\t"
220 "sta %%g0, [%0 + %%g2] %6\n\t"
221 "sta %%g0, [%0 + %%g3] %6\n\t"
222 "sta %%g0, [%0 + %%g4] %6\n\t"
223 "sta %%g0, [%0 + %%g5] %6\n\t"
224 "sta %%g0, [%0 + %%o4] %6\n\t"
225 "bg 1b\n\t"
226 " add %0, %%o5, %0\n"
227 : "=&r" (addr), "=&r" (left), "=&r" (lsize)
228 : "0" (addr), "1" (left), "2" (lsize),
229 "i" (ASI_FLUSHPG)
230 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
231 }
232}
233
234/* The sun4c's do have an on chip store buffer. And the way you
235 * clear them out isn't so obvious. The only way I can think of
236 * to accomplish this is to read the current context register,
237 * store the same value there, then read an external hardware
238 * register.
239 */
240void sun4c_complete_all_stores(void)
241{
242 volatile int _unused;
243
244 _unused = sun4c_get_context();
245 sun4c_set_context(_unused);
246 _unused = get_auxio();
247}
248
249/* Bootup utility functions. */
250static inline void sun4c_init_clean_segmap(unsigned char pseg)
251{
252 unsigned long vaddr;
253
254 sun4c_put_segmap(0, pseg);
255 for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
256 sun4c_put_pte(vaddr, 0);
257 sun4c_put_segmap(0, invalid_segment);
258}
259
260static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
261{
262 unsigned long vaddr;
263 unsigned char savectx, ctx;
264
265 savectx = sun4c_get_context();
266 for (ctx = 0; ctx < num_contexts; ctx++) {
267 sun4c_set_context(ctx);
268 for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
269 sun4c_put_segmap(vaddr, invalid_segment);
270 for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
271 sun4c_put_segmap(vaddr, invalid_segment);
272 for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
273 sun4c_put_segmap(vaddr, invalid_segment);
274 for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
275 sun4c_put_segmap(vaddr, invalid_segment);
276 }
277 sun4c_set_context(savectx);
278}
279
280void __init sun4c_probe_vac(void)
281{
282 sun4c_disable_vac();
283
284 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
285 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
286 /* PROM on SS1 lacks this info, to be super safe we
287 * hard code it here since this arch is cast in stone.
288 */
289 sun4c_vacinfo.num_bytes = 65536;
290 sun4c_vacinfo.linesize = 16;
291 } else {
292 sun4c_vacinfo.num_bytes =
293 prom_getintdefault(prom_root_node, "vac-size", 65536);
294 sun4c_vacinfo.linesize =
295 prom_getintdefault(prom_root_node, "vac-linesize", 16);
296 }
297 sun4c_vacinfo.do_hwflushes =
298 prom_getintdefault(prom_root_node, "vac-hwflush", 0);
299
300 if (sun4c_vacinfo.do_hwflushes == 0)
301 sun4c_vacinfo.do_hwflushes =
302 prom_getintdefault(prom_root_node, "vac_hwflush", 0);
303
304 if (sun4c_vacinfo.num_bytes != 65536) {
305 prom_printf("WEIRD Sun4C VAC cache size, "
306 "tell sparclinux@vger.kernel.org");
307 prom_halt();
308 }
309
310 switch (sun4c_vacinfo.linesize) {
311 case 16:
312 sun4c_vacinfo.log2lsize = 4;
313 break;
314 case 32:
315 sun4c_vacinfo.log2lsize = 5;
316 break;
317 default:
318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
319 sun4c_vacinfo.linesize);
320 prom_halt();
321 }
322
323 sun4c_flush_all();
324 sun4c_enable_vac();
325}
326
327/* Patch instructions for the low level kernel fault handler. */
328extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
329extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
330extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
331extern unsigned long num_context_patch1, num_context_patch1_16;
332extern unsigned long num_context_patch2_16;
333extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
334extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
335extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
336
337#define PATCH_INSN(src, dst) do { \
338 daddr = &(dst); \
339 iaddr = &(src); \
340 *daddr = *iaddr; \
341 } while (0)
342
343static void __init patch_kernel_fault_handler(void)
344{
345 unsigned long *iaddr, *daddr;
346
347 switch (num_segmaps) {
348 case 128:
349 /* Default, nothing to do. */
350 break;
351 case 256:
352 PATCH_INSN(invalid_segment_patch1_ff,
353 invalid_segment_patch1);
354 PATCH_INSN(invalid_segment_patch2_ff,
355 invalid_segment_patch2);
356 break;
357 case 512:
358 PATCH_INSN(invalid_segment_patch1_1ff,
359 invalid_segment_patch1);
360 PATCH_INSN(invalid_segment_patch2_1ff,
361 invalid_segment_patch2);
362 break;
363 default:
364 prom_printf("Unhandled number of segmaps: %d\n",
365 num_segmaps);
366 prom_halt();
367 }
368 switch (num_contexts) {
369 case 8:
370 /* Default, nothing to do. */
371 break;
372 case 16:
373 PATCH_INSN(num_context_patch1_16,
374 num_context_patch1);
375 break;
376 default:
377 prom_printf("Unhandled number of contexts: %d\n",
378 num_contexts);
379 prom_halt();
380 }
381
382 if (sun4c_vacinfo.do_hwflushes != 0) {
383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
384 PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
385 } else {
386 switch (sun4c_vacinfo.linesize) {
387 case 16:
388 /* Default, nothing to do. */
389 break;
390 case 32:
391 PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
392 break;
393 default:
394 prom_printf("Impossible VAC linesize %d, halting...\n",
395 sun4c_vacinfo.linesize);
396 prom_halt();
397 }
398 }
399}
400
401static void __init sun4c_probe_mmu(void)
402{
403 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
404 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
405 /* Hardcode these just to be safe, PROM on SS1 does
406 * not have this info available in the root node.
407 */
408 num_segmaps = 128;
409 num_contexts = 8;
410 } else {
411 num_segmaps =
412 prom_getintdefault(prom_root_node, "mmu-npmg", 128);
413 num_contexts =
414 prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
415 }
416 patch_kernel_fault_handler();
417}
418
419volatile unsigned long __iomem *sun4c_memerr_reg = NULL;
420
421void __init sun4c_probe_memerr_reg(void)
422{
423 phandle node;
424 struct linux_prom_registers regs[1];
425
426 node = prom_getchild(prom_root_node);
427 node = prom_searchsiblings(prom_root_node, "memory-error");
428 if (!node)
429 return;
430 if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0)
431 return;
432 /* hmm I think regs[0].which_io is zero here anyways */
433 sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
434}
435
436static inline void sun4c_init_ss2_cache_bug(void)
437{
438 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
439 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
440 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
441 /* Whee.. */
442 printk("SS2 cache bug detected, uncaching trap table page\n");
443 sun4c_flush_page((unsigned int) &_start);
444 sun4c_put_pte(((unsigned long) &_start),
445 (sun4c_get_pte((unsigned long) &_start) | _SUN4C_PAGE_NOCACHE));
446 }
447}
448
449/* Addr is always aligned on a page boundary for us already. */
450static int sun4c_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
451 unsigned long addr, int len)
452{
453 unsigned long page, end;
454
455 *pba = addr;
456
457 end = PAGE_ALIGN((addr + len));
458 while (addr < end) {
459 page = va;
460 sun4c_flush_page(page);
461 page -= PAGE_OFFSET;
462 page >>= PAGE_SHIFT;
463 page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
464 _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
465 sun4c_put_pte(addr, page);
466 addr += PAGE_SIZE;
467 va += PAGE_SIZE;
468 }
469
470 return 0;
471}
472
473static void sun4c_unmap_dma_area(struct device *dev, unsigned long busa, int len)
474{
475 /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
476 /* XXX Implement this */
477}
478
479/* TLB management. */
480
481/* Don't change this struct without changing entry.S. This is used
482 * in the in-window kernel fault handler, and you don't want to mess
483 * with that. (See sun4c_fault in entry.S).
484 */
485struct sun4c_mmu_entry {
486 struct sun4c_mmu_entry *next;
487 struct sun4c_mmu_entry *prev;
488 unsigned long vaddr;
489 unsigned char pseg;
490 unsigned char locked;
491
492 /* For user mappings only, and completely hidden from kernel
493 * TLB miss code.
494 */
495 unsigned char ctx;
496 struct sun4c_mmu_entry *lru_next;
497 struct sun4c_mmu_entry *lru_prev;
498};
499
500static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
501
502static void __init sun4c_init_mmu_entry_pool(void)
503{
504 int i;
505
506 for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
507 mmu_entry_pool[i].pseg = i;
508 mmu_entry_pool[i].next = NULL;
509 mmu_entry_pool[i].prev = NULL;
510 mmu_entry_pool[i].vaddr = 0;
511 mmu_entry_pool[i].locked = 0;
512 mmu_entry_pool[i].ctx = 0;
513 mmu_entry_pool[i].lru_next = NULL;
514 mmu_entry_pool[i].lru_prev = NULL;
515 }
516 mmu_entry_pool[invalid_segment].locked = 1;
517}
518
519static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
520 unsigned long bits_off)
521{
522 unsigned long start, end;
523
524 end = vaddr + SUN4C_REAL_PGDIR_SIZE;
525 for (start = vaddr; start < end; start += PAGE_SIZE)
526 if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
527 sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
528 ~bits_off);
529}
530
531static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
532{
533 unsigned long vaddr;
534 unsigned char pseg, ctx;
535
536 for (vaddr = KADB_DEBUGGER_BEGVM;
537 vaddr < LINUX_OPPROM_ENDVM;
538 vaddr += SUN4C_REAL_PGDIR_SIZE) {
539 pseg = sun4c_get_segmap(vaddr);
540 if (pseg != invalid_segment) {
541 mmu_entry_pool[pseg].locked = 1;
542 for (ctx = 0; ctx < num_contexts; ctx++)
543 prom_putsegment(ctx, vaddr, pseg);
544 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
545 }
546 }
547
548 for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
549 pseg = sun4c_get_segmap(vaddr);
550 mmu_entry_pool[pseg].locked = 1;
551 for (ctx = 0; ctx < num_contexts; ctx++)
552 prom_putsegment(ctx, vaddr, pseg);
553 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
554 }
555}
556
557static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
558{
559 int i, ctx;
560
561 while (start < end) {
562 for (i = 0; i < invalid_segment; i++)
563 if (!mmu_entry_pool[i].locked)
564 break;
565 mmu_entry_pool[i].locked = 1;
566 sun4c_init_clean_segmap(i);
567 for (ctx = 0; ctx < num_contexts; ctx++)
568 prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
569 start += SUN4C_REAL_PGDIR_SIZE;
570 }
571}
572
573/* Don't change this struct without changing entry.S. This is used
574 * in the in-window kernel fault handler, and you don't want to mess
575 * with that. (See sun4c_fault in entry.S).
576 */
577struct sun4c_mmu_ring {
578 struct sun4c_mmu_entry ringhd;
579 int num_entries;
580};
581
582static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
583static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
584static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */
585struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
586struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
587
588static inline void sun4c_init_rings(void)
589{
590 int i;
591
592 for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
593 sun4c_context_ring[i].ringhd.next =
594 sun4c_context_ring[i].ringhd.prev =
595 &sun4c_context_ring[i].ringhd;
596 sun4c_context_ring[i].num_entries = 0;
597 }
598 sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
599 &sun4c_ufree_ring.ringhd;
600 sun4c_ufree_ring.num_entries = 0;
601 sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
602 &sun4c_ulru_ring.ringhd;
603 sun4c_ulru_ring.num_entries = 0;
604 sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
605 &sun4c_kernel_ring.ringhd;
606 sun4c_kernel_ring.num_entries = 0;
607 sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
608 &sun4c_kfree_ring.ringhd;
609 sun4c_kfree_ring.num_entries = 0;
610}
611
612static void add_ring(struct sun4c_mmu_ring *ring,
613 struct sun4c_mmu_entry *entry)
614{
615 struct sun4c_mmu_entry *head = &ring->ringhd;
616
617 entry->prev = head;
618 (entry->next = head->next)->prev = entry;
619 head->next = entry;
620 ring->num_entries++;
621}
622
623static inline void add_lru(struct sun4c_mmu_entry *entry)
624{
625 struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
626 struct sun4c_mmu_entry *head = &ring->ringhd;
627
628 entry->lru_next = head;
629 (entry->lru_prev = head->lru_prev)->lru_next = entry;
630 head->lru_prev = entry;
631}
632
633static void add_ring_ordered(struct sun4c_mmu_ring *ring,
634 struct sun4c_mmu_entry *entry)
635{
636 struct sun4c_mmu_entry *head = &ring->ringhd;
637 unsigned long addr = entry->vaddr;
638
639 while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
640 head = head->next;
641
642 entry->prev = head;
643 (entry->next = head->next)->prev = entry;
644 head->next = entry;
645 ring->num_entries++;
646
647 add_lru(entry);
648}
649
650static inline void remove_ring(struct sun4c_mmu_ring *ring,
651 struct sun4c_mmu_entry *entry)
652{
653 struct sun4c_mmu_entry *next = entry->next;
654
655 (next->prev = entry->prev)->next = next;
656 ring->num_entries--;
657}
658
659static void remove_lru(struct sun4c_mmu_entry *entry)
660{
661 struct sun4c_mmu_entry *next = entry->lru_next;
662
663 (next->lru_prev = entry->lru_prev)->lru_next = next;
664}
665
666static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
667{
668 remove_ring(sun4c_context_ring+ctx, entry);
669 remove_lru(entry);
670 add_ring(&sun4c_ufree_ring, entry);
671}
672
673static void free_kernel_entry(struct sun4c_mmu_entry *entry,
674 struct sun4c_mmu_ring *ring)
675{
676 remove_ring(ring, entry);
677 add_ring(&sun4c_kfree_ring, entry);
678}
679
680static void __init sun4c_init_fill_kernel_ring(int howmany)
681{
682 int i;
683
684 while (howmany) {
685 for (i = 0; i < invalid_segment; i++)
686 if (!mmu_entry_pool[i].locked)
687 break;
688 mmu_entry_pool[i].locked = 1;
689 sun4c_init_clean_segmap(i);
690 add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
691 howmany--;
692 }
693}
694
695static void __init sun4c_init_fill_user_ring(void)
696{
697 int i;
698
699 for (i = 0; i < invalid_segment; i++) {
700 if (mmu_entry_pool[i].locked)
701 continue;
702 sun4c_init_clean_segmap(i);
703 add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
704 }
705}
706
707static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
708{
709 int savectx, ctx;
710
711 savectx = sun4c_get_context();
712 for (ctx = 0; ctx < num_contexts; ctx++) {
713 sun4c_set_context(ctx);
714 sun4c_put_segmap(kentry->vaddr, invalid_segment);
715 }
716 sun4c_set_context(savectx);
717}
718
719static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
720{
721 int savectx, ctx;
722
723 savectx = sun4c_get_context();
724 for (ctx = 0; ctx < num_contexts; ctx++) {
725 sun4c_set_context(ctx);
726 sun4c_put_segmap(kentry->vaddr, kentry->pseg);
727 }
728 sun4c_set_context(savectx);
729}
730
731#define sun4c_user_unmap(__entry) \
732 sun4c_put_segmap((__entry)->vaddr, invalid_segment)
733
734static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
735{
736 struct sun4c_mmu_entry *head = &crp->ringhd;
737 unsigned long flags;
738
739 local_irq_save(flags);
740 if (head->next != head) {
741 struct sun4c_mmu_entry *entry = head->next;
742 int savectx = sun4c_get_context();
743
744 flush_user_windows();
745 sun4c_set_context(ctx);
746 sun4c_flush_context();
747 do {
748 struct sun4c_mmu_entry *next = entry->next;
749
750 sun4c_user_unmap(entry);
751 free_user_entry(ctx, entry);
752
753 entry = next;
754 } while (entry != head);
755 sun4c_set_context(savectx);
756 }
757 local_irq_restore(flags);
758}
759
760static int sun4c_user_taken_entries; /* This is how much we have. */
761static int max_user_taken_entries; /* This limits us and prevents deadlock. */
762
763static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
764{
765 struct sun4c_mmu_entry *this_entry;
766
767 /* If some are free, return first one. */
768 if (sun4c_kfree_ring.num_entries) {
769 this_entry = sun4c_kfree_ring.ringhd.next;
770 return this_entry;
771 }
772
773 /* Else free one up. */
774 this_entry = sun4c_kernel_ring.ringhd.prev;
775 sun4c_flush_segment(this_entry->vaddr);
776 sun4c_kernel_unmap(this_entry);
777 free_kernel_entry(this_entry, &sun4c_kernel_ring);
778 this_entry = sun4c_kfree_ring.ringhd.next;
779
780 return this_entry;
781}
782
783/* Using this method to free up mmu entries eliminates a lot of
784 * potential races since we have a kernel that incurs tlb
785 * replacement faults. There may be performance penalties.
786 *
787 * NOTE: Must be called with interrupts disabled.
788 */
789static struct sun4c_mmu_entry *sun4c_user_strategy(void)
790{
791 struct sun4c_mmu_entry *entry;
792 unsigned char ctx;
793 int savectx;
794
795 /* If some are free, return first one. */
796 if (sun4c_ufree_ring.num_entries) {
797 entry = sun4c_ufree_ring.ringhd.next;
798 goto unlink_out;
799 }
800
801 if (sun4c_user_taken_entries) {
802 entry = sun4c_kernel_strategy();
803 sun4c_user_taken_entries--;
804 goto kunlink_out;
805 }
806
807 /* Grab from the beginning of the LRU list. */
808 entry = sun4c_ulru_ring.ringhd.lru_next;
809 ctx = entry->ctx;
810
811 savectx = sun4c_get_context();
812 flush_user_windows();
813 sun4c_set_context(ctx);
814 sun4c_flush_segment(entry->vaddr);
815 sun4c_user_unmap(entry);
816 remove_ring(sun4c_context_ring + ctx, entry);
817 remove_lru(entry);
818 sun4c_set_context(savectx);
819
820 return entry;
821
822unlink_out:
823 remove_ring(&sun4c_ufree_ring, entry);
824 return entry;
825kunlink_out:
826 remove_ring(&sun4c_kfree_ring, entry);
827 return entry;
828}
829
830/* NOTE: Must be called with interrupts disabled. */
831void sun4c_grow_kernel_ring(void)
832{
833 struct sun4c_mmu_entry *entry;
834
835 /* Prevent deadlock condition. */
836 if (sun4c_user_taken_entries >= max_user_taken_entries)
837 return;
838
839 if (sun4c_ufree_ring.num_entries) {
840 entry = sun4c_ufree_ring.ringhd.next;
841 remove_ring(&sun4c_ufree_ring, entry);
842 add_ring(&sun4c_kfree_ring, entry);
843 sun4c_user_taken_entries++;
844 }
845}
846
847/* 2 page buckets for task struct and kernel stack allocation.
848 *
849 * TASK_STACK_BEGIN
850 * bucket[0]
851 * bucket[1]
852 * [ ... ]
853 * bucket[NR_TASK_BUCKETS-1]
854 * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
855 *
856 * Each slot looks like:
857 *
858 * page 1 -- task struct + beginning of kernel stack
859 * page 2 -- rest of kernel stack
860 */
861
862union task_union *sun4c_bucket[NR_TASK_BUCKETS];
863
864static int sun4c_lowbucket_avail;
865
866#define BUCKET_EMPTY ((union task_union *) 0)
867#define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */
868#define BUCKET_SIZE (1 << BUCKET_SHIFT)
869#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
870#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
871#define BUCKET_PTE(page) \
872 ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
873#define BUCKET_PTE_PAGE(pte) \
874 (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
875
876static void get_locked_segment(unsigned long addr)
877{
878 struct sun4c_mmu_entry *stolen;
879 unsigned long flags;
880
881 local_irq_save(flags);
882 addr &= SUN4C_REAL_PGDIR_MASK;
883 stolen = sun4c_user_strategy();
884 max_user_taken_entries--;
885 stolen->vaddr = addr;
886 flush_user_windows();
887 sun4c_kernel_map(stolen);
888 local_irq_restore(flags);
889}
890
891static void free_locked_segment(unsigned long addr)
892{
893 struct sun4c_mmu_entry *entry;
894 unsigned long flags;
895 unsigned char pseg;
896
897 local_irq_save(flags);
898 addr &= SUN4C_REAL_PGDIR_MASK;
899 pseg = sun4c_get_segmap(addr);
900 entry = &mmu_entry_pool[pseg];
901
902 flush_user_windows();
903 sun4c_flush_segment(addr);
904 sun4c_kernel_unmap(entry);
905 add_ring(&sun4c_ufree_ring, entry);
906 max_user_taken_entries++;
907 local_irq_restore(flags);
908}
909
910static inline void garbage_collect(int entry)
911{
912 int start, end;
913
914 /* 32 buckets per segment... */
915 entry &= ~31;
916 start = entry;
917 for (end = (start + 32); start < end; start++)
918 if (sun4c_bucket[start] != BUCKET_EMPTY)
919 return;
920
921 /* Entire segment empty, release it. */
922 free_locked_segment(BUCKET_ADDR(entry));
923}
924
925static struct thread_info *sun4c_alloc_thread_info_node(int node)
926{
927 unsigned long addr, pages;
928 int entry;
929
930 pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
931 if (!pages)
932 return NULL;
933
934 for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
935 if (sun4c_bucket[entry] == BUCKET_EMPTY)
936 break;
937 if (entry == NR_TASK_BUCKETS) {
938 free_pages(pages, THREAD_INFO_ORDER);
939 return NULL;
940 }
941 if (entry >= sun4c_lowbucket_avail)
942 sun4c_lowbucket_avail = entry + 1;
943
944 addr = BUCKET_ADDR(entry);
945 sun4c_bucket[entry] = (union task_union *) addr;
946 if(sun4c_get_segmap(addr) == invalid_segment)
947 get_locked_segment(addr);
948
949 /* We are changing the virtual color of the page(s)
950 * so we must flush the cache to guarantee consistency.
951 */
952 sun4c_flush_page(pages);
953 sun4c_flush_page(pages + PAGE_SIZE);
954
955 sun4c_put_pte(addr, BUCKET_PTE(pages));
956 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
957
958#ifdef CONFIG_DEBUG_STACK_USAGE
959 memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER);
960#endif /* DEBUG_STACK_USAGE */
961
962 return (struct thread_info *) addr;
963}
964
965static void sun4c_free_thread_info(struct thread_info *ti)
966{
967 unsigned long tiaddr = (unsigned long) ti;
968 unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr));
969 int entry = BUCKET_NUM(tiaddr);
970
971 /* We are deleting a mapping, so the flush here is mandatory. */
972 sun4c_flush_page(tiaddr);
973 sun4c_flush_page(tiaddr + PAGE_SIZE);
974
975 sun4c_put_pte(tiaddr, 0);
976 sun4c_put_pte(tiaddr + PAGE_SIZE, 0);
977
978 sun4c_bucket[entry] = BUCKET_EMPTY;
979 if (entry < sun4c_lowbucket_avail)
980 sun4c_lowbucket_avail = entry;
981
982 free_pages(pages, THREAD_INFO_ORDER);
983 garbage_collect(entry);
984}
985
986static void __init sun4c_init_buckets(void)
987{
988 int entry;
989
990 if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) {
991 extern void thread_info_size_is_bolixed_pete(void);
992 thread_info_size_is_bolixed_pete();
993 }
994
995 for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
996 sun4c_bucket[entry] = BUCKET_EMPTY;
997 sun4c_lowbucket_avail = 0;
998}
999
1000static unsigned long sun4c_iobuffer_start;
1001static unsigned long sun4c_iobuffer_end;
1002static unsigned long sun4c_iobuffer_high;
1003static unsigned long *sun4c_iobuffer_map;
1004static int iobuffer_map_size;
1005
1006/*
1007 * Alias our pages so they do not cause a trap.
1008 * Also one page may be aliased into several I/O areas and we may
1009 * finish these I/O separately.
1010 */
1011static char *sun4c_lockarea(char *vaddr, unsigned long size)
1012{
1013 unsigned long base, scan;
1014 unsigned long npages;
1015 unsigned long vpage;
1016 unsigned long pte;
1017 unsigned long apage;
1018 unsigned long high;
1019 unsigned long flags;
1020
1021 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1022 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1023
1024 local_irq_save(flags);
1025 base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
1026 0, npages, 0);
1027 if (base >= iobuffer_map_size)
1028 goto abend;
1029
1030 high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
1031 high = SUN4C_REAL_PGDIR_ALIGN(high);
1032 while (high > sun4c_iobuffer_high) {
1033 get_locked_segment(sun4c_iobuffer_high);
1034 sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
1035 }
1036
1037 vpage = ((unsigned long) vaddr) & PAGE_MASK;
1038 for (scan = base; scan < base+npages; scan++) {
1039 pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
1040 pte |= pgprot_val(SUN4C_PAGE_KERNEL);
1041 pte |= _SUN4C_PAGE_NOCACHE;
1042 set_bit(scan, sun4c_iobuffer_map);
1043 apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
1044
1045 /* Flush original mapping so we see the right things later. */
1046 sun4c_flush_page(vpage);
1047
1048 sun4c_put_pte(apage, pte);
1049 vpage += PAGE_SIZE;
1050 }
1051 local_irq_restore(flags);
1052 return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
1053 (((unsigned long) vaddr) & ~PAGE_MASK));
1054
1055abend:
1056 local_irq_restore(flags);
1057 printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
1058 panic("Out of iobuffer table");
1059 return NULL;
1060}
1061
1062static void sun4c_unlockarea(char *vaddr, unsigned long size)
1063{
1064 unsigned long vpage, npages;
1065 unsigned long flags;
1066 int scan, high;
1067
1068 vpage = (unsigned long)vaddr & PAGE_MASK;
1069 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1070 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1071
1072 local_irq_save(flags);
1073 while (npages != 0) {
1074 --npages;
1075
1076 /* This mapping is marked non-cachable, no flush necessary. */
1077 sun4c_put_pte(vpage, 0);
1078 clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
1079 sun4c_iobuffer_map);
1080 vpage += PAGE_SIZE;
1081 }
1082
1083 /* garbage collect */
1084 scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
1085 while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
1086 scan -= 32;
1087 scan += 32;
1088 high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
1089 high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
1090 while (high < sun4c_iobuffer_high) {
1091 sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
1092 free_locked_segment(sun4c_iobuffer_high);
1093 }
1094 local_irq_restore(flags);
1095}
1096
1097/* Note the scsi code at init time passes to here buffers
1098 * which sit on the kernel stack, those are already locked
1099 * by implication and fool the page locking code above
1100 * if passed to by mistake.
1101 */
1102static __u32 sun4c_get_scsi_one(struct device *dev, char *bufptr, unsigned long len)
1103{
1104 unsigned long page;
1105
1106 page = ((unsigned long)bufptr) & PAGE_MASK;
1107 if (!virt_addr_valid(page)) {
1108 sun4c_flush_page(page);
1109 return (__u32)bufptr; /* already locked */
1110 }
1111 return (__u32)sun4c_lockarea(bufptr, len);
1112}
1113
1114static void sun4c_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
1115{
1116 while (sz != 0) {
1117 --sz;
1118 sg->dma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
1119 sg->dma_length = sg->length;
1120 sg = sg_next(sg);
1121 }
1122}
1123
1124static void sun4c_release_scsi_one(struct device *dev, __u32 bufptr, unsigned long len)
1125{
1126 if (bufptr < sun4c_iobuffer_start)
1127 return; /* On kernel stack or similar, see above */
1128 sun4c_unlockarea((char *)bufptr, len);
1129}
1130
1131static void sun4c_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
1132{
1133 while (sz != 0) {
1134 --sz;
1135 sun4c_unlockarea((char *)sg->dma_address, sg->length);
1136 sg = sg_next(sg);
1137 }
1138}
1139
1140#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
1141#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1142
1143struct vm_area_struct sun4c_kstack_vma;
1144
1145static void __init sun4c_init_lock_areas(void)
1146{
1147 unsigned long sun4c_taskstack_start;
1148 unsigned long sun4c_taskstack_end;
1149 int bitmap_size;
1150
1151 sun4c_init_buckets();
1152 sun4c_taskstack_start = SUN4C_LOCK_VADDR;
1153 sun4c_taskstack_end = (sun4c_taskstack_start +
1154 (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
1155 if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
1156 prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
1157 prom_halt();
1158 }
1159
1160 sun4c_iobuffer_start = sun4c_iobuffer_high =
1161 SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
1162 sun4c_iobuffer_end = SUN4C_LOCK_END;
1163 bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
1164 bitmap_size = (bitmap_size + 7) >> 3;
1165 bitmap_size = LONG_ALIGN(bitmap_size);
1166 iobuffer_map_size = bitmap_size << 3;
1167 sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
1168 memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
1169
1170 sun4c_kstack_vma.vm_mm = &init_mm;
1171 sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
1172 sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
1173 sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
1174 sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
1175 insert_vm_struct(&init_mm, &sun4c_kstack_vma);
1176}
1177
1178/* Cache flushing on the sun4c. */
1179static void sun4c_flush_cache_all(void)
1180{
1181 unsigned long begin, end;
1182
1183 flush_user_windows();
1184 begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
1185 end = (begin + SUN4C_VAC_SIZE);
1186
1187 if (sun4c_vacinfo.linesize == 32) {
1188 while (begin < end) {
1189 __asm__ __volatile__(
1190 "ld [%0 + 0x00], %%g0\n\t"
1191 "ld [%0 + 0x20], %%g0\n\t"
1192 "ld [%0 + 0x40], %%g0\n\t"
1193 "ld [%0 + 0x60], %%g0\n\t"
1194 "ld [%0 + 0x80], %%g0\n\t"
1195 "ld [%0 + 0xa0], %%g0\n\t"
1196 "ld [%0 + 0xc0], %%g0\n\t"
1197 "ld [%0 + 0xe0], %%g0\n\t"
1198 "ld [%0 + 0x100], %%g0\n\t"
1199 "ld [%0 + 0x120], %%g0\n\t"
1200 "ld [%0 + 0x140], %%g0\n\t"
1201 "ld [%0 + 0x160], %%g0\n\t"
1202 "ld [%0 + 0x180], %%g0\n\t"
1203 "ld [%0 + 0x1a0], %%g0\n\t"
1204 "ld [%0 + 0x1c0], %%g0\n\t"
1205 "ld [%0 + 0x1e0], %%g0\n"
1206 : : "r" (begin));
1207 begin += 512;
1208 }
1209 } else {
1210 while (begin < end) {
1211 __asm__ __volatile__(
1212 "ld [%0 + 0x00], %%g0\n\t"
1213 "ld [%0 + 0x10], %%g0\n\t"
1214 "ld [%0 + 0x20], %%g0\n\t"
1215 "ld [%0 + 0x30], %%g0\n\t"
1216 "ld [%0 + 0x40], %%g0\n\t"
1217 "ld [%0 + 0x50], %%g0\n\t"
1218 "ld [%0 + 0x60], %%g0\n\t"
1219 "ld [%0 + 0x70], %%g0\n\t"
1220 "ld [%0 + 0x80], %%g0\n\t"
1221 "ld [%0 + 0x90], %%g0\n\t"
1222 "ld [%0 + 0xa0], %%g0\n\t"
1223 "ld [%0 + 0xb0], %%g0\n\t"
1224 "ld [%0 + 0xc0], %%g0\n\t"
1225 "ld [%0 + 0xd0], %%g0\n\t"
1226 "ld [%0 + 0xe0], %%g0\n\t"
1227 "ld [%0 + 0xf0], %%g0\n"
1228 : : "r" (begin));
1229 begin += 256;
1230 }
1231 }
1232}
1233
1234static void sun4c_flush_cache_mm(struct mm_struct *mm)
1235{
1236 int new_ctx = mm->context;
1237
1238 if (new_ctx != NO_CONTEXT) {
1239 flush_user_windows();
1240
1241 if (sun4c_context_ring[new_ctx].num_entries) {
1242 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1243 unsigned long flags;
1244
1245 local_irq_save(flags);
1246 if (head->next != head) {
1247 struct sun4c_mmu_entry *entry = head->next;
1248 int savectx = sun4c_get_context();
1249
1250 sun4c_set_context(new_ctx);
1251 sun4c_flush_context();
1252 do {
1253 struct sun4c_mmu_entry *next = entry->next;
1254
1255 sun4c_user_unmap(entry);
1256 free_user_entry(new_ctx, entry);
1257
1258 entry = next;
1259 } while (entry != head);
1260 sun4c_set_context(savectx);
1261 }
1262 local_irq_restore(flags);
1263 }
1264 }
1265}
1266
1267static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1268{
1269 struct mm_struct *mm = vma->vm_mm;
1270 int new_ctx = mm->context;
1271
1272 if (new_ctx != NO_CONTEXT) {
1273 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1274 struct sun4c_mmu_entry *entry;
1275 unsigned long flags;
1276
1277 flush_user_windows();
1278
1279 local_irq_save(flags);
1280 /* All user segmap chains are ordered on entry->vaddr. */
1281 for (entry = head->next;
1282 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1283 entry = entry->next)
1284 ;
1285
1286 /* Tracing various job mixtures showed that this conditional
1287 * only passes ~35% of the time for most worse case situations,
1288 * therefore we avoid all of this gross overhead ~65% of the time.
1289 */
1290 if ((entry != head) && (entry->vaddr < end)) {
1291 int octx = sun4c_get_context();
1292 sun4c_set_context(new_ctx);
1293
1294 /* At this point, always, (start >= entry->vaddr) and
1295 * (entry->vaddr < end), once the latter condition
1296 * ceases to hold, or we hit the end of the list, we
1297 * exit the loop. The ordering of all user allocated
1298 * segmaps makes this all work out so beautifully.
1299 */
1300 do {
1301 struct sun4c_mmu_entry *next = entry->next;
1302 unsigned long realend;
1303
1304 /* "realstart" is always >= entry->vaddr */
1305 realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
1306 if (end < realend)
1307 realend = end;
1308 if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
1309 unsigned long page = entry->vaddr;
1310 while (page < realend) {
1311 sun4c_flush_page(page);
1312 page += PAGE_SIZE;
1313 }
1314 } else {
1315 sun4c_flush_segment(entry->vaddr);
1316 sun4c_user_unmap(entry);
1317 free_user_entry(new_ctx, entry);
1318 }
1319 entry = next;
1320 } while ((entry != head) && (entry->vaddr < end));
1321 sun4c_set_context(octx);
1322 }
1323 local_irq_restore(flags);
1324 }
1325}
1326
1327static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1328{
1329 struct mm_struct *mm = vma->vm_mm;
1330 int new_ctx = mm->context;
1331
1332 /* Sun4c has no separate I/D caches so cannot optimize for non
1333 * text page flushes.
1334 */
1335 if (new_ctx != NO_CONTEXT) {
1336 int octx = sun4c_get_context();
1337 unsigned long flags;
1338
1339 flush_user_windows();
1340 local_irq_save(flags);
1341 sun4c_set_context(new_ctx);
1342 sun4c_flush_page(page);
1343 sun4c_set_context(octx);
1344 local_irq_restore(flags);
1345 }
1346}
1347
1348static void sun4c_flush_page_to_ram(unsigned long page)
1349{
1350 unsigned long flags;
1351
1352 local_irq_save(flags);
1353 sun4c_flush_page(page);
1354 local_irq_restore(flags);
1355}
1356
1357/* Sun4c cache is unified, both instructions and data live there, so
1358 * no need to flush the on-stack instructions for new signal handlers.
1359 */
1360static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1361{
1362}
1363
1364/* TLB flushing on the sun4c. These routines count on the cache
1365 * flushing code to flush the user register windows so that we need
1366 * not do so when we get here.
1367 */
1368
1369static void sun4c_flush_tlb_all(void)
1370{
1371 struct sun4c_mmu_entry *this_entry, *next_entry;
1372 unsigned long flags;
1373 int savectx, ctx;
1374
1375 local_irq_save(flags);
1376 this_entry = sun4c_kernel_ring.ringhd.next;
1377 savectx = sun4c_get_context();
1378 flush_user_windows();
1379 while (sun4c_kernel_ring.num_entries) {
1380 next_entry = this_entry->next;
1381 sun4c_flush_segment(this_entry->vaddr);
1382 for (ctx = 0; ctx < num_contexts; ctx++) {
1383 sun4c_set_context(ctx);
1384 sun4c_put_segmap(this_entry->vaddr, invalid_segment);
1385 }
1386 free_kernel_entry(this_entry, &sun4c_kernel_ring);
1387 this_entry = next_entry;
1388 }
1389 sun4c_set_context(savectx);
1390 local_irq_restore(flags);
1391}
1392
1393static void sun4c_flush_tlb_mm(struct mm_struct *mm)
1394{
1395 int new_ctx = mm->context;
1396
1397 if (new_ctx != NO_CONTEXT) {
1398 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1399 unsigned long flags;
1400
1401 local_irq_save(flags);
1402 if (head->next != head) {
1403 struct sun4c_mmu_entry *entry = head->next;
1404 int savectx = sun4c_get_context();
1405
1406 sun4c_set_context(new_ctx);
1407 sun4c_flush_context();
1408 do {
1409 struct sun4c_mmu_entry *next = entry->next;
1410
1411 sun4c_user_unmap(entry);
1412 free_user_entry(new_ctx, entry);
1413
1414 entry = next;
1415 } while (entry != head);
1416 sun4c_set_context(savectx);
1417 }
1418 local_irq_restore(flags);
1419 }
1420}
1421
1422static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1423{
1424 struct mm_struct *mm = vma->vm_mm;
1425 int new_ctx = mm->context;
1426
1427 if (new_ctx != NO_CONTEXT) {
1428 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1429 struct sun4c_mmu_entry *entry;
1430 unsigned long flags;
1431
1432 local_irq_save(flags);
1433 /* See commentary in sun4c_flush_cache_range(). */
1434 for (entry = head->next;
1435 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1436 entry = entry->next)
1437 ;
1438
1439 if ((entry != head) && (entry->vaddr < end)) {
1440 int octx = sun4c_get_context();
1441
1442 sun4c_set_context(new_ctx);
1443 do {
1444 struct sun4c_mmu_entry *next = entry->next;
1445
1446 sun4c_flush_segment(entry->vaddr);
1447 sun4c_user_unmap(entry);
1448 free_user_entry(new_ctx, entry);
1449
1450 entry = next;
1451 } while ((entry != head) && (entry->vaddr < end));
1452 sun4c_set_context(octx);
1453 }
1454 local_irq_restore(flags);
1455 }
1456}
1457
1458static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1459{
1460 struct mm_struct *mm = vma->vm_mm;
1461 int new_ctx = mm->context;
1462
1463 if (new_ctx != NO_CONTEXT) {
1464 int savectx = sun4c_get_context();
1465 unsigned long flags;
1466
1467 local_irq_save(flags);
1468 sun4c_set_context(new_ctx);
1469 page &= PAGE_MASK;
1470 sun4c_flush_page(page);
1471 sun4c_put_pte(page, 0);
1472 sun4c_set_context(savectx);
1473 local_irq_restore(flags);
1474 }
1475}
1476
1477static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
1478{
1479 unsigned long page_entry, pg_iobits;
1480
1481 pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
1482 _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
1483
1484 page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
1485 page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
1486 sun4c_put_pte(virt_addr, page_entry);
1487}
1488
1489static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
1490 unsigned long xva, unsigned int len)
1491{
1492 while (len != 0) {
1493 len -= PAGE_SIZE;
1494 sun4c_mapioaddr(xpa, xva);
1495 xva += PAGE_SIZE;
1496 xpa += PAGE_SIZE;
1497 }
1498}
1499
1500static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
1501{
1502 while (len != 0) {
1503 len -= PAGE_SIZE;
1504 sun4c_put_pte(virt_addr, 0);
1505 virt_addr += PAGE_SIZE;
1506 }
1507}
1508
1509static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
1510{
1511 struct ctx_list *ctxp;
1512
1513 ctxp = ctx_free.next;
1514 if (ctxp != &ctx_free) {
1515 remove_from_ctx_list(ctxp);
1516 add_to_used_ctxlist(ctxp);
1517 mm->context = ctxp->ctx_number;
1518 ctxp->ctx_mm = mm;
1519 return;
1520 }
1521 ctxp = ctx_used.next;
1522 if (ctxp->ctx_mm == old_mm)
1523 ctxp = ctxp->next;
1524 remove_from_ctx_list(ctxp);
1525 add_to_used_ctxlist(ctxp);
1526 ctxp->ctx_mm->context = NO_CONTEXT;
1527 ctxp->ctx_mm = mm;
1528 mm->context = ctxp->ctx_number;
1529 sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
1530 ctxp->ctx_number);
1531}
1532
1533/* Switch the current MM context. */
1534static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
1535{
1536 struct ctx_list *ctx;
1537 int dirty = 0;
1538
1539 if (mm->context == NO_CONTEXT) {
1540 dirty = 1;
1541 sun4c_alloc_context(old_mm, mm);
1542 } else {
1543 /* Update the LRU ring of contexts. */
1544 ctx = ctx_list_pool + mm->context;
1545 remove_from_ctx_list(ctx);
1546 add_to_used_ctxlist(ctx);
1547 }
1548 if (dirty || old_mm != mm)
1549 sun4c_set_context(mm->context);
1550}
1551
1552static void sun4c_destroy_context(struct mm_struct *mm)
1553{
1554 struct ctx_list *ctx_old;
1555
1556 if (mm->context != NO_CONTEXT) {
1557 sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
1558 ctx_old = ctx_list_pool + mm->context;
1559 remove_from_ctx_list(ctx_old);
1560 add_to_free_ctxlist(ctx_old);
1561 mm->context = NO_CONTEXT;
1562 }
1563}
1564
1565static void sun4c_mmu_info(struct seq_file *m)
1566{
1567 int used_user_entries, i;
1568
1569 used_user_entries = 0;
1570 for (i = 0; i < num_contexts; i++)
1571 used_user_entries += sun4c_context_ring[i].num_entries;
1572
1573 seq_printf(m,
1574 "vacsize\t\t: %d bytes\n"
1575 "vachwflush\t: %s\n"
1576 "vaclinesize\t: %d bytes\n"
1577 "mmuctxs\t\t: %d\n"
1578 "mmupsegs\t: %d\n"
1579 "kernelpsegs\t: %d\n"
1580 "kfreepsegs\t: %d\n"
1581 "usedpsegs\t: %d\n"
1582 "ufreepsegs\t: %d\n"
1583 "user_taken\t: %d\n"
1584 "max_taken\t: %d\n",
1585 sun4c_vacinfo.num_bytes,
1586 (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
1587 sun4c_vacinfo.linesize,
1588 num_contexts,
1589 (invalid_segment + 1),
1590 sun4c_kernel_ring.num_entries,
1591 sun4c_kfree_ring.num_entries,
1592 used_user_entries,
1593 sun4c_ufree_ring.num_entries,
1594 sun4c_user_taken_entries,
1595 max_user_taken_entries);
1596}
1597
1598/* Nothing below here should touch the mmu hardware nor the mmu_entry
1599 * data structures.
1600 */
1601
1602/* First the functions which the mid-level code uses to directly
1603 * manipulate the software page tables. Some defines since we are
1604 * emulating the i386 page directory layout.
1605 */
1606#define PGD_PRESENT 0x001
1607#define PGD_RW 0x002
1608#define PGD_USER 0x004
1609#define PGD_ACCESSED 0x020
1610#define PGD_DIRTY 0x040
1611#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
1612
1613static void sun4c_set_pte(pte_t *ptep, pte_t pte)
1614{
1615 *ptep = pte;
1616}
1617
1618static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
1619{
1620}
1621
1622static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
1623{
1624 pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
1625}
1626
1627static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
1628{
1629 if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */
1630 pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
1631}
1632
1633static int sun4c_pte_present(pte_t pte)
1634{
1635 return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
1636}
1637static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
1638
1639static int sun4c_pmd_bad(pmd_t pmd)
1640{
1641 return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
1642 (!virt_addr_valid(pmd_val(pmd))));
1643}
1644
1645static int sun4c_pmd_present(pmd_t pmd)
1646{
1647 return ((pmd_val(pmd) & PGD_PRESENT) != 0);
1648}
1649
1650#if 0 /* if PMD takes one word */
1651static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }
1652#else /* if pmd_t is a longish aggregate */
1653static void sun4c_pmd_clear(pmd_t *pmdp) {
1654 memset((void *)pmdp, 0, sizeof(pmd_t));
1655}
1656#endif
1657
1658static int sun4c_pgd_none(pgd_t pgd) { return 0; }
1659static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
1660static int sun4c_pgd_present(pgd_t pgd) { return 1; }
1661static void sun4c_pgd_clear(pgd_t * pgdp) { }
1662
1663/*
1664 * The following only work if pte_present() is true.
1665 * Undefined behaviour if not..
1666 */
1667static pte_t sun4c_pte_mkwrite(pte_t pte)
1668{
1669 pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
1670 if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
1671 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1672 return pte;
1673}
1674
1675static pte_t sun4c_pte_mkdirty(pte_t pte)
1676{
1677 pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
1678 if (pte_val(pte) & _SUN4C_PAGE_WRITE)
1679 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1680 return pte;
1681}
1682
1683static pte_t sun4c_pte_mkyoung(pte_t pte)
1684{
1685 pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
1686 if (pte_val(pte) & _SUN4C_PAGE_READ)
1687 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
1688 return pte;
1689}
1690
1691/*
1692 * Conversion functions: convert a page and protection to a page entry,
1693 * and a page entry and page directory to the page they refer to.
1694 */
1695static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
1696{
1697 return __pte(page_to_pfn(page) | pgprot_val(pgprot));
1698}
1699
1700static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
1701{
1702 return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
1703}
1704
1705static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
1706{
1707 return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
1708}
1709
1710static unsigned long sun4c_pte_pfn(pte_t pte)
1711{
1712 return pte_val(pte) & SUN4C_PFN_MASK;
1713}
1714
1715static pte_t sun4c_pgoff_to_pte(unsigned long pgoff)
1716{
1717 return __pte(pgoff | _SUN4C_PAGE_FILE);
1718}
1719
1720static unsigned long sun4c_pte_to_pgoff(pte_t pte)
1721{
1722 return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1);
1723}
1724
1725
1726static inline unsigned long sun4c_pmd_page_v(pmd_t pmd)
1727{
1728 return (pmd_val(pmd) & PAGE_MASK);
1729}
1730
1731static struct page *sun4c_pmd_page(pmd_t pmd)
1732{
1733 return virt_to_page(sun4c_pmd_page_v(pmd));
1734}
1735
1736static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; }
1737
1738/* to find an entry in a page-table-directory */
1739static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
1740{
1741 return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
1742}
1743
1744/* Find an entry in the second-level page table.. */
1745static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
1746{
1747 return (pmd_t *) dir;
1748}
1749
1750/* Find an entry in the third-level page table.. */
1751pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
1752{
1753 return (pte_t *) sun4c_pmd_page_v(*dir) +
1754 ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
1755}
1756
1757static unsigned long sun4c_swp_type(swp_entry_t entry)
1758{
1759 return (entry.val & SUN4C_SWP_TYPE_MASK);
1760}
1761
1762static unsigned long sun4c_swp_offset(swp_entry_t entry)
1763{
1764 return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
1765}
1766
1767static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
1768{
1769 return (swp_entry_t) {
1770 (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
1771 | (type & SUN4C_SWP_TYPE_MASK) };
1772}
1773
1774static void sun4c_free_pte_slow(pte_t *pte)
1775{
1776 free_page((unsigned long)pte);
1777}
1778
1779static void sun4c_free_pgd_slow(pgd_t *pgd)
1780{
1781 free_page((unsigned long)pgd);
1782}
1783
1784static pgd_t *sun4c_get_pgd_fast(void)
1785{
1786 unsigned long *ret;
1787
1788 if ((ret = pgd_quicklist) != NULL) {
1789 pgd_quicklist = (unsigned long *)(*ret);
1790 ret[0] = ret[1];
1791 pgtable_cache_size--;
1792 } else {
1793 pgd_t *init;
1794
1795 ret = (unsigned long *)__get_free_page(GFP_KERNEL);
1796 memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
1797 init = sun4c_pgd_offset(&init_mm, 0);
1798 memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
1799 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
1800 }
1801 return (pgd_t *)ret;
1802}
1803
1804static void sun4c_free_pgd_fast(pgd_t *pgd)
1805{
1806 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1807 pgd_quicklist = (unsigned long *) pgd;
1808 pgtable_cache_size++;
1809}
1810
1811
1812static inline pte_t *
1813sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1814{
1815 unsigned long *ret;
1816
1817 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1818 pte_quicklist = (unsigned long *)(*ret);
1819 ret[0] = ret[1];
1820 pgtable_cache_size--;
1821 }
1822 return (pte_t *)ret;
1823}
1824
1825static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1826{
1827 pte_t *pte;
1828
1829 if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
1830 return pte;
1831
1832 pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
1833 return pte;
1834}
1835
1836static pgtable_t sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
1837{
1838 pte_t *pte;
1839 struct page *page;
1840
1841 pte = sun4c_pte_alloc_one_kernel(mm, address);
1842 if (pte == NULL)
1843 return NULL;
1844 page = virt_to_page(pte);
1845 pgtable_page_ctor(page);
1846 return page;
1847}
1848
1849static inline void sun4c_free_pte_fast(pte_t *pte)
1850{
1851 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1852 pte_quicklist = (unsigned long *) pte;
1853 pgtable_cache_size++;
1854}
1855
1856static void sun4c_pte_free(pgtable_t pte)
1857{
1858 pgtable_page_dtor(pte);
1859 sun4c_free_pte_fast(page_address(pte));
1860}
1861
1862/*
1863 * allocating and freeing a pmd is trivial: the 1-entry pmd is
1864 * inside the pgd, so has no extra memory associated with it.
1865 */
1866static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
1867{
1868 BUG();
1869 return NULL;
1870}
1871
1872static void sun4c_free_pmd_fast(pmd_t * pmd) { }
1873
1874static void sun4c_check_pgt_cache(int low, int high)
1875{
1876 if (pgtable_cache_size > high) {
1877 do {
1878 if (pgd_quicklist)
1879 sun4c_free_pgd_slow(sun4c_get_pgd_fast());
1880 if (pte_quicklist)
1881 sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0));
1882 } while (pgtable_cache_size > low);
1883 }
1884}
1885
1886/* An experiment, turn off by default for now... -DaveM */
1887#define SUN4C_PRELOAD_PSEG
1888
1889void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
1890{
1891 unsigned long flags;
1892 int pseg;
1893
1894 if (vma->vm_mm->context == NO_CONTEXT)
1895 return;
1896
1897 local_irq_save(flags);
1898 address &= PAGE_MASK;
1899 if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
1900 struct sun4c_mmu_entry *entry = sun4c_user_strategy();
1901 struct mm_struct *mm = vma->vm_mm;
1902 unsigned long start, end;
1903
1904 entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
1905 entry->ctx = mm->context;
1906 add_ring_ordered(sun4c_context_ring + mm->context, entry);
1907 sun4c_put_segmap(entry->vaddr, entry->pseg);
1908 end = start + SUN4C_REAL_PGDIR_SIZE;
1909 while (start < end) {
1910#ifdef SUN4C_PRELOAD_PSEG
1911 pgd_t *pgdp = sun4c_pgd_offset(mm, start);
1912 pte_t *ptep;
1913
1914 if (!pgdp)
1915 goto no_mapping;
1916 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start);
1917 if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
1918 goto no_mapping;
1919 sun4c_put_pte(start, pte_val(*ptep));
1920 goto next;
1921
1922 no_mapping:
1923#endif
1924 sun4c_put_pte(start, 0);
1925#ifdef SUN4C_PRELOAD_PSEG
1926 next:
1927#endif
1928 start += PAGE_SIZE;
1929 }
1930#ifndef SUN4C_PRELOAD_PSEG
1931 sun4c_put_pte(address, pte_val(*ptep));
1932#endif
1933 local_irq_restore(flags);
1934 return;
1935 } else {
1936 struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
1937
1938 remove_lru(entry);
1939 add_lru(entry);
1940 }
1941
1942 sun4c_put_pte(address, pte_val(*ptep));
1943 local_irq_restore(flags);
1944}
1945
1946extern void sparc_context_init(int);
1947extern unsigned long bootmem_init(unsigned long *pages_avail);
1948extern unsigned long last_valid_pfn;
1949
1950void __init sun4c_paging_init(void)
1951{
1952 int i, cnt;
1953 unsigned long kernel_end, vaddr;
1954 extern struct resource sparc_iomap;
1955 unsigned long end_pfn, pages_avail;
1956
1957 kernel_end = (unsigned long) &_end;
1958 kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
1959
1960 pages_avail = 0;
1961 last_valid_pfn = bootmem_init(&pages_avail);
1962 end_pfn = last_valid_pfn;
1963
1964 sun4c_probe_mmu();
1965 invalid_segment = (num_segmaps - 1);
1966 sun4c_init_mmu_entry_pool();
1967 sun4c_init_rings();
1968 sun4c_init_map_kernelprom(kernel_end);
1969 sun4c_init_clean_mmu(kernel_end);
1970 sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
1971 sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
1972 sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
1973 sun4c_init_lock_areas();
1974 sun4c_init_fill_user_ring();
1975
1976 sun4c_set_context(0);
1977 memset(swapper_pg_dir, 0, PAGE_SIZE);
1978 memset(pg0, 0, PAGE_SIZE);
1979 memset(pg1, 0, PAGE_SIZE);
1980 memset(pg2, 0, PAGE_SIZE);
1981 memset(pg3, 0, PAGE_SIZE);
1982
1983 /* Save work later. */
1984 vaddr = VMALLOC_START;
1985 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
1986 vaddr += SUN4C_PGDIR_SIZE;
1987 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
1988 vaddr += SUN4C_PGDIR_SIZE;
1989 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
1990 vaddr += SUN4C_PGDIR_SIZE;
1991 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
1992 sun4c_init_ss2_cache_bug();
1993 sparc_context_init(num_contexts);
1994
1995 {
1996 unsigned long zones_size[MAX_NR_ZONES];
1997 unsigned long zholes_size[MAX_NR_ZONES];
1998 unsigned long npages;
1999 int znum;
2000
2001 for (znum = 0; znum < MAX_NR_ZONES; znum++)
2002 zones_size[znum] = zholes_size[znum] = 0;
2003
2004 npages = max_low_pfn - pfn_base;
2005
2006 zones_size[ZONE_DMA] = npages;
2007 zholes_size[ZONE_DMA] = npages - pages_avail;
2008
2009 npages = highend_pfn - max_low_pfn;
2010 zones_size[ZONE_HIGHMEM] = npages;
2011 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
2012
2013 free_area_init_node(0, zones_size, pfn_base, zholes_size);
2014 }
2015
2016 cnt = 0;
2017 for (i = 0; i < num_segmaps; i++)
2018 if (mmu_entry_pool[i].locked)
2019 cnt++;
2020
2021 max_user_taken_entries = num_segmaps - cnt - 40 - 1;
2022
2023 printk("SUN4C: %d mmu entries for the kernel\n", cnt);
2024}
2025
2026static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
2027{
2028 prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
2029
2030 return prot;
2031}
2032
2033/* Load up routines and constants for sun4c mmu */
2034void __init ld_mmu_sun4c(void)
2035{
2036 extern void ___xchg32_sun4c(void);
2037
2038 printk("Loading sun4c MMU routines\n");
2039
2040 /* First the constants */
2041 BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
2042 BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
2043 BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
2044
2045 BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
2046 BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
2047 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2048
2049 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2050 PAGE_SHARED = pgprot_val(SUN4C_PAGE_SHARED);
2051 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2052 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2053 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
2054 page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
2055
2056 /* Functions */
2057 BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
2058 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
2059 BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
2060
2061 BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
2062
2063 if (sun4c_vacinfo.do_hwflushes) {
2064 BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM);
2065 BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM);
2066 BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM);
2067 } else {
2068 BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM);
2069 BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM);
2070 BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM);
2071 }
2072
2073 BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM);
2074 BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM);
2075 BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM);
2076 BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM);
2077 BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM);
2078 BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM);
2079 BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM);
2080 BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM);
2081 BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM);
2082 BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
2083
2084 BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
2085
2086 BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
2087
2088 BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
2089#if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
2090 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
2091#else
2092 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
2093#endif
2094 BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM);
2095 BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM);
2096
2097 BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
2098 BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
2099
2100 BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
2101 BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
2102 BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
2103
2104 BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
2105 BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
2106 BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
2107 BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
2108
2109 BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
2110 BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
2111 BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
2112
2113 BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
2114 BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
2115 BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM);
2116 BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
2117 BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
2118 BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2119 BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
2120 BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
2121 BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
2122 BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
2123 BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
2124
2125 BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
2126 BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
2127 BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
2128 BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE);
2129 BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
2130 BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
2131 BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
2132 BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
2133 BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
2134 BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
2135 BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
2136
2137 BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM);
2138 BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM);
2139
2140 BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
2141 BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
2142
2143 BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
2144 BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
2145 BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
2146 BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
2147
2148 BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
2149 BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
2150
2151 BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
2152 BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
2153
2154 BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
2155 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
2156 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
2157
2158 BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM);
2159 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
2160
2161 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
2162
2163 /* These should _never_ get called with two level tables. */
2164 BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
2165 BTFIXUPSET_CALL(pgd_page_vaddr, sun4c_pgd_page, BTFIXUPCALL_RETO0);
2166}
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index 6dfcc13d3100..bf8ee0613ae7 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -14,7 +14,6 @@
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/pgtsrmmu.h> 15#include <asm/pgtsrmmu.h>
16#include <asm/viking.h> 16#include <asm/viking.h>
17#include <asm/btfixup.h>
18 17
19#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
20 .data 19 .data