diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc/mm |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/Makefile | 23 | ||||
-rw-r--r-- | arch/sparc/mm/btfixup.c | 336 | ||||
-rw-r--r-- | arch/sparc/mm/extable.c | 77 | ||||
-rw-r--r-- | arch/sparc/mm/fault.c | 596 | ||||
-rw-r--r-- | arch/sparc/mm/generic.c | 154 | ||||
-rw-r--r-- | arch/sparc/mm/highmem.c | 120 | ||||
-rw-r--r-- | arch/sparc/mm/hypersparc.S | 413 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 515 | ||||
-rw-r--r-- | arch/sparc/mm/io-unit.c | 318 | ||||
-rw-r--r-- | arch/sparc/mm/iommu.c | 475 | ||||
-rw-r--r-- | arch/sparc/mm/loadmmu.c | 46 | ||||
-rw-r--r-- | arch/sparc/mm/nosrmmu.c | 59 | ||||
-rw-r--r-- | arch/sparc/mm/nosun4c.c | 77 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 2274 | ||||
-rw-r--r-- | arch/sparc/mm/sun4c.c | 2276 | ||||
-rw-r--r-- | arch/sparc/mm/swift.S | 256 | ||||
-rw-r--r-- | arch/sparc/mm/tsunami.S | 133 | ||||
-rw-r--r-- | arch/sparc/mm/viking.S | 284 |
18 files changed, 8432 insertions, 0 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile new file mode 100644 index 000000000000..16eeba4b991a --- /dev/null +++ b/arch/sparc/mm/Makefile | |||
@@ -0,0 +1,23 @@ | |||
1 | # $Id: Makefile,v 1.38 2000/12/15 00:41:22 davem Exp $ | ||
2 | # Makefile for the linux Sparc-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | EXTRA_AFLAGS := -ansi | ||
6 | |||
7 | obj-y := fault.o init.o loadmmu.o generic.o extable.o btfixup.o | ||
8 | |||
9 | ifeq ($(CONFIG_SUN4),y) | ||
10 | obj-y += nosrmmu.o | ||
11 | else | ||
12 | obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o | ||
13 | endif | ||
14 | |||
15 | ifdef CONFIG_HIGHMEM | ||
16 | obj-y += highmem.o | ||
17 | endif | ||
18 | |||
19 | ifdef CONFIG_SMP | ||
20 | obj-y += nosun4c.o | ||
21 | else | ||
22 | obj-y += sun4c.o | ||
23 | endif | ||
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c new file mode 100644 index 000000000000..f147a44c9780 --- /dev/null +++ b/arch/sparc/mm/btfixup.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* $Id: btfixup.c,v 1.10 2000/05/09 17:40:13 davem Exp $ | ||
2 | * btfixup.c: Boot time code fixup and relocator, so that | ||
3 | * we can get rid of most indirect calls to achieve single | ||
4 | * image sun4c and srmmu kernel. | ||
5 | * | ||
6 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <asm/btfixup.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/pgalloc.h> | ||
15 | #include <asm/pgtable.h> | ||
16 | #include <asm/oplib.h> | ||
17 | #include <asm/system.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | |||
20 | #define BTFIXUP_OPTIMIZE_NOP | ||
21 | #define BTFIXUP_OPTIMIZE_OTHER | ||
22 | |||
23 | extern char *srmmu_name; | ||
24 | static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for "; | ||
25 | #ifdef CONFIG_SUN4 | ||
26 | static char str_sun4c[] __initdata = "sun4\n"; | ||
27 | #else | ||
28 | static char str_sun4c[] __initdata = "sun4c\n"; | ||
29 | #endif | ||
30 | static char str_srmmu[] __initdata = "srmmu[%s]/"; | ||
31 | static char str_iommu[] __initdata = "iommu\n"; | ||
32 | static char str_iounit[] __initdata = "io-unit\n"; | ||
33 | |||
34 | static int visited __initdata = 0; | ||
35 | extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[]; | ||
36 | extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[]; | ||
37 | static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n"; | ||
38 | static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n"; | ||
39 | static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n"; | ||
40 | static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n"; | ||
41 | static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n"; | ||
42 | static char wrong[] __initdata = "Wrong address for %c fixup %p\n"; | ||
43 | static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n"; | ||
44 | static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n"; | ||
45 | static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n"; | ||
46 | static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; | ||
47 | static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; | ||
48 | static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; | ||
49 | static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n"; | ||
50 | static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; | ||
51 | |||
52 | #ifdef BTFIXUP_OPTIMIZE_OTHER | ||
53 | static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) | ||
54 | { | ||
55 | if (!fmangled) | ||
56 | *addr = value; | ||
57 | else { | ||
58 | unsigned int *q = (unsigned int *)q1; | ||
59 | if (*addr == 0x01000000) { | ||
60 | /* Noped */ | ||
61 | *q = value; | ||
62 | } else if (addr[-1] == *q) { | ||
63 | /* Moved */ | ||
64 | addr[-1] = value; | ||
65 | *q = value; | ||
66 | } else { | ||
67 | prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value); | ||
68 | prom_halt(); | ||
69 | } | ||
70 | } | ||
71 | } | ||
72 | #else | ||
73 | static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) | ||
74 | { | ||
75 | *addr = value; | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | void __init btfixup(void) | ||
80 | { | ||
81 | unsigned int *p, *q; | ||
82 | int type, count; | ||
83 | unsigned insn; | ||
84 | unsigned *addr; | ||
85 | int fmangled = 0; | ||
86 | void (*flush_cacheall)(void); | ||
87 | |||
88 | if (!visited) { | ||
89 | visited++; | ||
90 | printk(version); | ||
91 | if (ARCH_SUN4C_SUN4) | ||
92 | printk(str_sun4c); | ||
93 | else { | ||
94 | printk(str_srmmu, srmmu_name); | ||
95 | if (sparc_cpu_model == sun4d) | ||
96 | printk(str_iounit); | ||
97 | else | ||
98 | printk(str_iommu); | ||
99 | } | ||
100 | } | ||
101 | for (p = ___btfixup_start; p < ___btfixup_end; ) { | ||
102 | count = p[2]; | ||
103 | q = p + 3; | ||
104 | switch (type = *(unsigned char *)p) { | ||
105 | case 'f': | ||
106 | count = p[3]; | ||
107 | q = p + 4; | ||
108 | if (((p[0] & 1) || p[1]) | ||
109 | && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) { | ||
110 | prom_printf(wrong_f, p, p[1]); | ||
111 | prom_halt(); | ||
112 | } | ||
113 | break; | ||
114 | case 'b': | ||
115 | if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) { | ||
116 | prom_printf(wrong_b, p, p[1]); | ||
117 | prom_halt(); | ||
118 | } | ||
119 | break; | ||
120 | case 's': | ||
121 | if (p[1] + 0x1000 >= 0x2000) { | ||
122 | prom_printf(wrong_s, p, p[1]); | ||
123 | prom_halt(); | ||
124 | } | ||
125 | break; | ||
126 | case 'h': | ||
127 | if (p[1] & 0x3ff) { | ||
128 | prom_printf(wrong_h, p, p[1]); | ||
129 | prom_halt(); | ||
130 | } | ||
131 | break; | ||
132 | case 'a': | ||
133 | if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) { | ||
134 | prom_printf(wrong_a, p, p[1]); | ||
135 | prom_halt(); | ||
136 | } | ||
137 | break; | ||
138 | } | ||
139 | if (p[0] & 1) { | ||
140 | p[0] &= ~1; | ||
141 | while (count) { | ||
142 | fmangled = 0; | ||
143 | addr = (unsigned *)*q; | ||
144 | if (addr < _stext || addr >= _end) { | ||
145 | prom_printf(wrong, type, p); | ||
146 | prom_halt(); | ||
147 | } | ||
148 | insn = *addr; | ||
149 | #ifdef BTFIXUP_OPTIMIZE_OTHER | ||
150 | if (type != 'f' && q[1]) { | ||
151 | insn = *(unsigned int *)q[1]; | ||
152 | if (!insn || insn == 1) | ||
153 | insn = *addr; | ||
154 | else | ||
155 | fmangled = 1; | ||
156 | } | ||
157 | #endif | ||
158 | switch (type) { | ||
159 | case 'f': /* CALL */ | ||
160 | if (addr >= __start___ksymtab && addr < __stop___ksymtab) { | ||
161 | *addr = p[1]; | ||
162 | break; | ||
163 | } else if (!q[1]) { | ||
164 | if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */ | ||
165 | *addr = (insn & 0xffc00000) | (p[1] >> 10); break; | ||
166 | } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */ | ||
167 | *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break; | ||
168 | } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */ | ||
169 | bad_f: | ||
170 | prom_printf(insn_f, p, addr, insn, addr[1]); | ||
171 | prom_halt(); | ||
172 | } | ||
173 | } else if (q[1] != 1) | ||
174 | addr[1] = q[1]; | ||
175 | if (p[2] == BTFIXUPCALL_NORM) { | ||
176 | norm_f: | ||
177 | *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2); | ||
178 | q[1] = 0; | ||
179 | break; | ||
180 | } | ||
181 | #ifndef BTFIXUP_OPTIMIZE_NOP | ||
182 | goto norm_f; | ||
183 | #else | ||
184 | if (!(addr[1] & 0x80000000)) { | ||
185 | if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */ | ||
186 | goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */ | ||
187 | } else { | ||
188 | if ((addr[1] & 0x01800000) == 0x01800000) { | ||
189 | if ((addr[1] & 0x01f80000) == 0x01e80000) { | ||
190 | /* RESTORE */ | ||
191 | goto norm_f; /* It is dangerous to patch that */ | ||
192 | } | ||
193 | goto bad_f; | ||
194 | } | ||
195 | if ((addr[1] & 0xffffe003) == 0x9e03e000) { | ||
196 | /* ADD %O7, XX, %o7 */ | ||
197 | int displac = (addr[1] << 19); | ||
198 | |||
199 | displac = (displac >> 21) + 2; | ||
200 | *addr = (0x10800000) + (displac & 0x3fffff); | ||
201 | q[1] = addr[1]; | ||
202 | addr[1] = p[2]; | ||
203 | break; | ||
204 | } | ||
205 | if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000) | ||
206 | goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */ | ||
207 | if ((addr[1] & 0x3e000000) == 0x1e000000) | ||
208 | goto norm_f; /* rd is %o7. We'd better take care. */ | ||
209 | } | ||
210 | if (p[2] == BTFIXUPCALL_NOP) { | ||
211 | *addr = 0x01000000; | ||
212 | q[1] = 1; | ||
213 | break; | ||
214 | } | ||
215 | #ifndef BTFIXUP_OPTIMIZE_OTHER | ||
216 | goto norm_f; | ||
217 | #else | ||
218 | if (addr[1] == 0x01000000) { /* NOP in the delay slot */ | ||
219 | q[1] = addr[1]; | ||
220 | *addr = p[2]; | ||
221 | break; | ||
222 | } | ||
223 | if ((addr[1] & 0xc0000000) != 0xc0000000) { | ||
224 | /* Not a memory operation */ | ||
225 | if ((addr[1] & 0x30000000) == 0x10000000) { | ||
226 | /* Ok, non-memory op with rd %oX */ | ||
227 | if ((addr[1] & 0x3e000000) == 0x1c000000) | ||
228 | goto bad_f; /* Aiee. Someone is playing strange %sp tricks */ | ||
229 | if ((addr[1] & 0x3e000000) > 0x12000000 || | ||
230 | ((addr[1] & 0x3e000000) == 0x12000000 && | ||
231 | p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) || | ||
232 | ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) { | ||
233 | /* Nobody uses the result. We can nop it out. */ | ||
234 | *addr = p[2]; | ||
235 | q[1] = addr[1]; | ||
236 | addr[1] = 0x01000000; | ||
237 | break; | ||
238 | } | ||
239 | if ((addr[1] & 0xf1ffffe0) == 0x90100000) { | ||
240 | /* MOV %reg, %Ox */ | ||
241 | if ((addr[1] & 0x3e000000) == 0x10000000 && | ||
242 | (p[2] & 0x7c000) == 0x20000) { | ||
243 | /* Ok, it is call xx; mov reg, %o0 and call optimizes | ||
244 | to doing something on %o0. Patch the patch. */ | ||
245 | *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14); | ||
246 | q[1] = addr[1]; | ||
247 | addr[1] = 0x01000000; | ||
248 | break; | ||
249 | } | ||
250 | if ((addr[1] & 0x3e000000) == 0x12000000 && | ||
251 | p[2] == BTFIXUPCALL_STO1O0) { | ||
252 | *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25); | ||
253 | q[1] = addr[1]; | ||
254 | addr[1] = 0x01000000; | ||
255 | break; | ||
256 | } | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | *addr = addr[1]; | ||
261 | q[1] = addr[1]; | ||
262 | addr[1] = p[2]; | ||
263 | break; | ||
264 | #endif /* BTFIXUP_OPTIMIZE_OTHER */ | ||
265 | #endif /* BTFIXUP_OPTIMIZE_NOP */ | ||
266 | case 'b': /* BLACKBOX */ | ||
267 | /* Has to be sethi i, xx */ | ||
268 | if ((insn & 0xc1c00000) != 0x01000000) { | ||
269 | prom_printf(insn_b, p, addr, insn); | ||
270 | prom_halt(); | ||
271 | } else { | ||
272 | void (*do_fixup)(unsigned *); | ||
273 | |||
274 | do_fixup = (void (*)(unsigned *))p[1]; | ||
275 | do_fixup(addr); | ||
276 | } | ||
277 | break; | ||
278 | case 's': /* SIMM13 */ | ||
279 | /* Has to be or %g0, i, xx */ | ||
280 | if ((insn & 0xc1ffe000) != 0x80102000) { | ||
281 | prom_printf(insn_s, p, addr, insn); | ||
282 | prom_halt(); | ||
283 | } | ||
284 | set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff)); | ||
285 | break; | ||
286 | case 'h': /* SETHI */ | ||
287 | /* Has to be sethi i, xx */ | ||
288 | if ((insn & 0xc1c00000) != 0x01000000) { | ||
289 | prom_printf(insn_h, p, addr, insn); | ||
290 | prom_halt(); | ||
291 | } | ||
292 | set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); | ||
293 | break; | ||
294 | case 'a': /* HALF */ | ||
295 | /* Has to be sethi i, xx or or %g0, i, xx */ | ||
296 | if ((insn & 0xc1c00000) != 0x01000000 && | ||
297 | (insn & 0xc1ffe000) != 0x80102000) { | ||
298 | prom_printf(insn_a, p, addr, insn); | ||
299 | prom_halt(); | ||
300 | } | ||
301 | if (p[1] & 0x3ff) | ||
302 | set_addr(addr, q[1], fmangled, | ||
303 | (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff)); | ||
304 | else | ||
305 | set_addr(addr, q[1], fmangled, | ||
306 | (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10)); | ||
307 | break; | ||
308 | case 'i': /* INT */ | ||
309 | if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ | ||
310 | set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); | ||
311 | else if ((insn & 0x80002000) == 0x80002000 && | ||
312 | (insn & 0x01800000) != 0x01800000) /* %LO */ | ||
313 | set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); | ||
314 | else { | ||
315 | prom_printf(insn_i, p, addr, insn); | ||
316 | prom_halt(); | ||
317 | } | ||
318 | break; | ||
319 | } | ||
320 | count -= 2; | ||
321 | q += 2; | ||
322 | } | ||
323 | } else | ||
324 | p = q + count; | ||
325 | } | ||
326 | #ifdef CONFIG_SMP | ||
327 | flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); | ||
328 | #else | ||
329 | flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); | ||
330 | #endif | ||
331 | if (!flush_cacheall) { | ||
332 | prom_printf(fca_und); | ||
333 | prom_halt(); | ||
334 | } | ||
335 | (*flush_cacheall)(); | ||
336 | } | ||
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c new file mode 100644 index 000000000000..c9845c71f426 --- /dev/null +++ b/arch/sparc/mm/extable.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * linux/arch/sparc/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <asm/uaccess.h> | ||
8 | |||
9 | void sort_extable(struct exception_table_entry *start, | ||
10 | struct exception_table_entry *finish) | ||
11 | { | ||
12 | } | ||
13 | |||
14 | /* Caller knows they are in a range if ret->fixup == 0 */ | ||
15 | const struct exception_table_entry * | ||
16 | search_extable(const struct exception_table_entry *start, | ||
17 | const struct exception_table_entry *last, | ||
18 | unsigned long value) | ||
19 | { | ||
20 | const struct exception_table_entry *walk; | ||
21 | |||
22 | /* Single insn entries are encoded as: | ||
23 | * word 1: insn address | ||
24 | * word 2: fixup code address | ||
25 | * | ||
26 | * Range entries are encoded as: | ||
27 | * word 1: first insn address | ||
28 | * word 2: 0 | ||
29 | * word 3: last insn address + 4 bytes | ||
30 | * word 4: fixup code address | ||
31 | * | ||
32 | * See asm/uaccess.h for more details. | ||
33 | */ | ||
34 | |||
35 | /* 1. Try to find an exact match. */ | ||
36 | for (walk = start; walk <= last; walk++) { | ||
37 | if (walk->fixup == 0) { | ||
38 | /* A range entry, skip both parts. */ | ||
39 | walk++; | ||
40 | continue; | ||
41 | } | ||
42 | |||
43 | if (walk->insn == value) | ||
44 | return walk; | ||
45 | } | ||
46 | |||
47 | /* 2. Try to find a range match. */ | ||
48 | for (walk = start; walk <= (last - 1); walk++) { | ||
49 | if (walk->fixup) | ||
50 | continue; | ||
51 | |||
52 | if (walk[0].insn <= value && walk[1].insn > value) | ||
53 | return walk; | ||
54 | |||
55 | walk++; | ||
56 | } | ||
57 | |||
58 | return NULL; | ||
59 | } | ||
60 | |||
61 | /* Special extable search, which handles ranges. Returns fixup */ | ||
62 | unsigned long search_extables_range(unsigned long addr, unsigned long *g2) | ||
63 | { | ||
64 | const struct exception_table_entry *entry; | ||
65 | |||
66 | entry = search_exception_tables(addr); | ||
67 | if (!entry) | ||
68 | return 0; | ||
69 | |||
70 | /* Inside range? Fix g2 and return correct fixup */ | ||
71 | if (!entry->fixup) { | ||
72 | *g2 = (addr - entry->insn) / 4; | ||
73 | return (entry + 1)->fixup; | ||
74 | } | ||
75 | |||
76 | return entry->fixup; | ||
77 | } | ||
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c new file mode 100644 index 000000000000..37f4107bae66 --- /dev/null +++ b/arch/sparc/mm/fault.c | |||
@@ -0,0 +1,596 @@ | |||
1 | /* $Id: fault.c,v 1.122 2001/11/17 07:19:26 davem Exp $ | ||
2 | * fault.c: Page fault handlers for the Sparc. | ||
3 | * | ||
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
6 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <asm/head.h> | ||
10 | |||
11 | #include <linux/string.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/module.h> | ||
24 | |||
25 | #include <asm/system.h> | ||
26 | #include <asm/segment.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/memreg.h> | ||
30 | #include <asm/openprom.h> | ||
31 | #include <asm/oplib.h> | ||
32 | #include <asm/smp.h> | ||
33 | #include <asm/traps.h> | ||
34 | #include <asm/kdebug.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | |||
37 | #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) | ||
38 | |||
39 | extern int prom_node_root; | ||
40 | |||
41 | /* At boot time we determine these two values necessary for setting | ||
42 | * up the segment maps and page table entries (pte's). | ||
43 | */ | ||
44 | |||
45 | int num_segmaps, num_contexts; | ||
46 | int invalid_segment; | ||
47 | |||
48 | /* various Virtual Address Cache parameters we find at boot time... */ | ||
49 | |||
50 | int vac_size, vac_linesize, vac_do_hw_vac_flushes; | ||
51 | int vac_entries_per_context, vac_entries_per_segment; | ||
52 | int vac_entries_per_page; | ||
53 | |||
54 | /* Nice, simple, prom library does all the sweating for us. ;) */ | ||
55 | int prom_probe_memory (void) | ||
56 | { | ||
57 | register struct linux_mlist_v0 *mlist; | ||
58 | register unsigned long bytes, base_paddr, tally; | ||
59 | register int i; | ||
60 | |||
61 | i = 0; | ||
62 | mlist= *prom_meminfo()->v0_available; | ||
63 | bytes = tally = mlist->num_bytes; | ||
64 | base_paddr = (unsigned long) mlist->start_adr; | ||
65 | |||
66 | sp_banks[0].base_addr = base_paddr; | ||
67 | sp_banks[0].num_bytes = bytes; | ||
68 | |||
69 | while (mlist->theres_more != (void *) 0){ | ||
70 | i++; | ||
71 | mlist = mlist->theres_more; | ||
72 | bytes = mlist->num_bytes; | ||
73 | tally += bytes; | ||
74 | if (i > SPARC_PHYS_BANKS-1) { | ||
75 | printk ("The machine has more banks than " | ||
76 | "this kernel can support\n" | ||
77 | "Increase the SPARC_PHYS_BANKS " | ||
78 | "setting (currently %d)\n", | ||
79 | SPARC_PHYS_BANKS); | ||
80 | i = SPARC_PHYS_BANKS-1; | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | sp_banks[i].base_addr = (unsigned long) mlist->start_adr; | ||
85 | sp_banks[i].num_bytes = mlist->num_bytes; | ||
86 | } | ||
87 | |||
88 | i++; | ||
89 | sp_banks[i].base_addr = 0xdeadbeef; | ||
90 | sp_banks[i].num_bytes = 0; | ||
91 | |||
92 | /* Now mask all bank sizes on a page boundary, it is all we can | ||
93 | * use anyways. | ||
94 | */ | ||
95 | for(i=0; sp_banks[i].num_bytes != 0; i++) | ||
96 | sp_banks[i].num_bytes &= PAGE_MASK; | ||
97 | |||
98 | return tally; | ||
99 | } | ||
100 | |||
101 | /* Traverse the memory lists in the prom to see how much physical we | ||
102 | * have. | ||
103 | */ | ||
104 | unsigned long | ||
105 | probe_memory(void) | ||
106 | { | ||
107 | int total; | ||
108 | |||
109 | total = prom_probe_memory(); | ||
110 | |||
111 | /* Oh man, much nicer, keep the dirt in promlib. */ | ||
112 | return total; | ||
113 | } | ||
114 | |||
115 | extern void sun4c_complete_all_stores(void); | ||
116 | |||
117 | /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */ | ||
118 | asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, | ||
119 | unsigned long svaddr, unsigned long aerr, | ||
120 | unsigned long avaddr) | ||
121 | { | ||
122 | sun4c_complete_all_stores(); | ||
123 | printk("FAULT: NMI received\n"); | ||
124 | printk("SREGS: Synchronous Error %08lx\n", serr); | ||
125 | printk(" Synchronous Vaddr %08lx\n", svaddr); | ||
126 | printk(" Asynchronous Error %08lx\n", aerr); | ||
127 | printk(" Asynchronous Vaddr %08lx\n", avaddr); | ||
128 | if (sun4c_memerr_reg) | ||
129 | printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg); | ||
130 | printk("REGISTER DUMP:\n"); | ||
131 | show_regs(regs); | ||
132 | prom_halt(); | ||
133 | } | ||
134 | |||
135 | static void unhandled_fault(unsigned long, struct task_struct *, | ||
136 | struct pt_regs *) __attribute__ ((noreturn)); | ||
137 | |||
138 | static void unhandled_fault(unsigned long address, struct task_struct *tsk, | ||
139 | struct pt_regs *regs) | ||
140 | { | ||
141 | if((unsigned long) address < PAGE_SIZE) { | ||
142 | printk(KERN_ALERT | ||
143 | "Unable to handle kernel NULL pointer dereference\n"); | ||
144 | } else { | ||
145 | printk(KERN_ALERT "Unable to handle kernel paging request " | ||
146 | "at virtual address %08lx\n", address); | ||
147 | } | ||
148 | printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", | ||
149 | (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); | ||
150 | printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", | ||
151 | (tsk->mm ? (unsigned long) tsk->mm->pgd : | ||
152 | (unsigned long) tsk->active_mm->pgd)); | ||
153 | die_if_kernel("Oops", regs); | ||
154 | } | ||
155 | |||
156 | asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, | ||
157 | unsigned long address) | ||
158 | { | ||
159 | struct pt_regs regs; | ||
160 | unsigned long g2; | ||
161 | unsigned int insn; | ||
162 | int i; | ||
163 | |||
164 | i = search_extables_range(ret_pc, &g2); | ||
165 | switch (i) { | ||
166 | case 3: | ||
167 | /* load & store will be handled by fixup */ | ||
168 | return 3; | ||
169 | |||
170 | case 1: | ||
171 | /* store will be handled by fixup, load will bump out */ | ||
172 | /* for _to_ macros */ | ||
173 | insn = *((unsigned int *) pc); | ||
174 | if ((insn >> 21) & 1) | ||
175 | return 1; | ||
176 | break; | ||
177 | |||
178 | case 2: | ||
179 | /* load will be handled by fixup, store will bump out */ | ||
180 | /* for _from_ macros */ | ||
181 | insn = *((unsigned int *) pc); | ||
182 | if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) | ||
183 | return 2; | ||
184 | break; | ||
185 | |||
186 | default: | ||
187 | break; | ||
188 | }; | ||
189 | |||
190 | memset(®s, 0, sizeof (regs)); | ||
191 | regs.pc = pc; | ||
192 | regs.npc = pc + 4; | ||
193 | __asm__ __volatile__( | ||
194 | "rd %%psr, %0\n\t" | ||
195 | "nop\n\t" | ||
196 | "nop\n\t" | ||
197 | "nop\n" : "=r" (regs.psr)); | ||
198 | unhandled_fault(address, current, ®s); | ||
199 | |||
200 | /* Not reached */ | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | extern unsigned long safe_compute_effective_address(struct pt_regs *, | ||
205 | unsigned int); | ||
206 | |||
207 | static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) | ||
208 | { | ||
209 | unsigned int insn; | ||
210 | |||
211 | if (text_fault) | ||
212 | return regs->pc; | ||
213 | |||
214 | if (regs->psr & PSR_PS) { | ||
215 | insn = *(unsigned int *) regs->pc; | ||
216 | } else { | ||
217 | __get_user(insn, (unsigned int *) regs->pc); | ||
218 | } | ||
219 | |||
220 | return safe_compute_effective_address(regs, insn); | ||
221 | } | ||
222 | |||
223 | asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | ||
224 | unsigned long address) | ||
225 | { | ||
226 | struct vm_area_struct *vma; | ||
227 | struct task_struct *tsk = current; | ||
228 | struct mm_struct *mm = tsk->mm; | ||
229 | unsigned int fixup; | ||
230 | unsigned long g2; | ||
231 | siginfo_t info; | ||
232 | int from_user = !(regs->psr & PSR_PS); | ||
233 | |||
234 | if(text_fault) | ||
235 | address = regs->pc; | ||
236 | |||
237 | /* | ||
238 | * We fault-in kernel-space virtual memory on-demand. The | ||
239 | * 'reference' page table is init_mm.pgd. | ||
240 | * | ||
241 | * NOTE! We MUST NOT take any locks for this case. We may | ||
242 | * be in an interrupt or a critical region, and should | ||
243 | * only copy the information from the master page table, | ||
244 | * nothing more. | ||
245 | */ | ||
246 | if (!ARCH_SUN4C_SUN4 && address >= TASK_SIZE) | ||
247 | goto vmalloc_fault; | ||
248 | |||
249 | info.si_code = SEGV_MAPERR; | ||
250 | |||
251 | /* | ||
252 | * If we're in an interrupt or have no user | ||
253 | * context, we must not take the fault.. | ||
254 | */ | ||
255 | if (in_atomic() || !mm) | ||
256 | goto no_context; | ||
257 | |||
258 | down_read(&mm->mmap_sem); | ||
259 | |||
260 | /* | ||
261 | * The kernel referencing a bad kernel pointer can lock up | ||
262 | * a sun4c machine completely, so we must attempt recovery. | ||
263 | */ | ||
264 | if(!from_user && address >= PAGE_OFFSET) | ||
265 | goto bad_area; | ||
266 | |||
267 | vma = find_vma(mm, address); | ||
268 | if(!vma) | ||
269 | goto bad_area; | ||
270 | if(vma->vm_start <= address) | ||
271 | goto good_area; | ||
272 | if(!(vma->vm_flags & VM_GROWSDOWN)) | ||
273 | goto bad_area; | ||
274 | if(expand_stack(vma, address)) | ||
275 | goto bad_area; | ||
276 | /* | ||
277 | * Ok, we have a good vm_area for this memory access, so | ||
278 | * we can handle it.. | ||
279 | */ | ||
280 | good_area: | ||
281 | info.si_code = SEGV_ACCERR; | ||
282 | if(write) { | ||
283 | if(!(vma->vm_flags & VM_WRITE)) | ||
284 | goto bad_area; | ||
285 | } else { | ||
286 | /* Allow reads even for write-only mappings */ | ||
287 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
288 | goto bad_area; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * If for any reason at all we couldn't handle the fault, | ||
293 | * make sure we exit gracefully rather than endlessly redo | ||
294 | * the fault. | ||
295 | */ | ||
296 | switch (handle_mm_fault(mm, vma, address, write)) { | ||
297 | case VM_FAULT_SIGBUS: | ||
298 | goto do_sigbus; | ||
299 | case VM_FAULT_OOM: | ||
300 | goto out_of_memory; | ||
301 | case VM_FAULT_MAJOR: | ||
302 | current->maj_flt++; | ||
303 | break; | ||
304 | case VM_FAULT_MINOR: | ||
305 | default: | ||
306 | current->min_flt++; | ||
307 | break; | ||
308 | } | ||
309 | up_read(&mm->mmap_sem); | ||
310 | return; | ||
311 | |||
312 | /* | ||
313 | * Something tried to access memory that isn't in our memory map.. | ||
314 | * Fix it, but check if it's kernel or user first.. | ||
315 | */ | ||
316 | bad_area: | ||
317 | up_read(&mm->mmap_sem); | ||
318 | |||
319 | bad_area_nosemaphore: | ||
320 | /* User mode accesses just cause a SIGSEGV */ | ||
321 | if(from_user) { | ||
322 | #if 0 | ||
323 | printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n", | ||
324 | tsk->comm, tsk->pid, address, regs->pc); | ||
325 | #endif | ||
326 | info.si_signo = SIGSEGV; | ||
327 | info.si_errno = 0; | ||
328 | /* info.si_code set above to make clear whether | ||
329 | this was a SEGV_MAPERR or SEGV_ACCERR fault. */ | ||
330 | info.si_addr = (void __user *)compute_si_addr(regs, text_fault); | ||
331 | info.si_trapno = 0; | ||
332 | force_sig_info (SIGSEGV, &info, tsk); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | /* Is this in ex_table? */ | ||
337 | no_context: | ||
338 | g2 = regs->u_regs[UREG_G2]; | ||
339 | if (!from_user && (fixup = search_extables_range(regs->pc, &g2))) { | ||
340 | if (fixup > 10) { /* Values below are reserved for other things */ | ||
341 | extern const unsigned __memset_start[]; | ||
342 | extern const unsigned __memset_end[]; | ||
343 | extern const unsigned __csum_partial_copy_start[]; | ||
344 | extern const unsigned __csum_partial_copy_end[]; | ||
345 | |||
346 | #ifdef DEBUG_EXCEPTIONS | ||
347 | printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); | ||
348 | printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", | ||
349 | regs->pc, fixup, g2); | ||
350 | #endif | ||
351 | if ((regs->pc >= (unsigned long)__memset_start && | ||
352 | regs->pc < (unsigned long)__memset_end) || | ||
353 | (regs->pc >= (unsigned long)__csum_partial_copy_start && | ||
354 | regs->pc < (unsigned long)__csum_partial_copy_end)) { | ||
355 | regs->u_regs[UREG_I4] = address; | ||
356 | regs->u_regs[UREG_I5] = regs->pc; | ||
357 | } | ||
358 | regs->u_regs[UREG_G2] = g2; | ||
359 | regs->pc = fixup; | ||
360 | regs->npc = regs->pc + 4; | ||
361 | return; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | unhandled_fault (address, tsk, regs); | ||
366 | do_exit(SIGKILL); | ||
367 | |||
368 | /* | ||
369 | * We ran out of memory, or some other thing happened to us that made | ||
370 | * us unable to handle the page fault gracefully. | ||
371 | */ | ||
372 | out_of_memory: | ||
373 | up_read(&mm->mmap_sem); | ||
374 | printk("VM: killing process %s\n", tsk->comm); | ||
375 | if (from_user) | ||
376 | do_exit(SIGKILL); | ||
377 | goto no_context; | ||
378 | |||
379 | do_sigbus: | ||
380 | up_read(&mm->mmap_sem); | ||
381 | info.si_signo = SIGBUS; | ||
382 | info.si_errno = 0; | ||
383 | info.si_code = BUS_ADRERR; | ||
384 | info.si_addr = (void __user *) compute_si_addr(regs, text_fault); | ||
385 | info.si_trapno = 0; | ||
386 | force_sig_info (SIGBUS, &info, tsk); | ||
387 | if (!from_user) | ||
388 | goto no_context; | ||
389 | |||
390 | vmalloc_fault: | ||
391 | { | ||
392 | /* | ||
393 | * Synchronize this task's top level page-table | ||
394 | * with the 'reference' page table. | ||
395 | */ | ||
396 | int offset = pgd_index(address); | ||
397 | pgd_t *pgd, *pgd_k; | ||
398 | pmd_t *pmd, *pmd_k; | ||
399 | |||
400 | pgd = tsk->active_mm->pgd + offset; | ||
401 | pgd_k = init_mm.pgd + offset; | ||
402 | |||
403 | if (!pgd_present(*pgd)) { | ||
404 | if (!pgd_present(*pgd_k)) | ||
405 | goto bad_area_nosemaphore; | ||
406 | pgd_val(*pgd) = pgd_val(*pgd_k); | ||
407 | return; | ||
408 | } | ||
409 | |||
410 | pmd = pmd_offset(pgd, address); | ||
411 | pmd_k = pmd_offset(pgd_k, address); | ||
412 | |||
413 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | ||
414 | goto bad_area_nosemaphore; | ||
415 | *pmd = *pmd_k; | ||
416 | return; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, | ||
421 | unsigned long address) | ||
422 | { | ||
423 | extern void sun4c_update_mmu_cache(struct vm_area_struct *, | ||
424 | unsigned long,pte_t); | ||
425 | extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); | ||
426 | struct task_struct *tsk = current; | ||
427 | struct mm_struct *mm = tsk->mm; | ||
428 | pgd_t *pgdp; | ||
429 | pte_t *ptep; | ||
430 | |||
431 | if (text_fault) { | ||
432 | address = regs->pc; | ||
433 | } else if (!write && | ||
434 | !(regs->psr & PSR_PS)) { | ||
435 | unsigned int insn, __user *ip; | ||
436 | |||
437 | ip = (unsigned int __user *)regs->pc; | ||
438 | if (!get_user(insn, ip)) { | ||
439 | if ((insn & 0xc1680000) == 0xc0680000) | ||
440 | write = 1; | ||
441 | } | ||
442 | } | ||
443 | |||
444 | if (!mm) { | ||
445 | /* We are oopsing. */ | ||
446 | do_sparc_fault(regs, text_fault, write, address); | ||
447 | BUG(); /* P3 Oops already, you bitch */ | ||
448 | } | ||
449 | |||
450 | pgdp = pgd_offset(mm, address); | ||
451 | ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address); | ||
452 | |||
453 | if (pgd_val(*pgdp)) { | ||
454 | if (write) { | ||
455 | if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) | ||
456 | == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) { | ||
457 | unsigned long flags; | ||
458 | |||
459 | *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | | ||
460 | _SUN4C_PAGE_MODIFIED | | ||
461 | _SUN4C_PAGE_VALID | | ||
462 | _SUN4C_PAGE_DIRTY); | ||
463 | |||
464 | local_irq_save(flags); | ||
465 | if (sun4c_get_segmap(address) != invalid_segment) { | ||
466 | sun4c_put_pte(address, pte_val(*ptep)); | ||
467 | local_irq_restore(flags); | ||
468 | return; | ||
469 | } | ||
470 | local_irq_restore(flags); | ||
471 | } | ||
472 | } else { | ||
473 | if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) | ||
474 | == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) { | ||
475 | unsigned long flags; | ||
476 | |||
477 | *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | | ||
478 | _SUN4C_PAGE_VALID); | ||
479 | |||
480 | local_irq_save(flags); | ||
481 | if (sun4c_get_segmap(address) != invalid_segment) { | ||
482 | sun4c_put_pte(address, pte_val(*ptep)); | ||
483 | local_irq_restore(flags); | ||
484 | return; | ||
485 | } | ||
486 | local_irq_restore(flags); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
491 | /* This conditional is 'interesting'. */ | ||
492 | if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE)) | ||
493 | && (pte_val(*ptep) & _SUN4C_PAGE_VALID)) | ||
494 | /* Note: It is safe to not grab the MMAP semaphore here because | ||
495 | * we know that update_mmu_cache() will not sleep for | ||
496 | * any reason (at least not in the current implementation) | ||
497 | * and therefore there is no danger of another thread getting | ||
498 | * on the CPU and doing a shrink_mmap() on this vma. | ||
499 | */ | ||
500 | sun4c_update_mmu_cache (find_vma(current->mm, address), address, | ||
501 | *ptep); | ||
502 | else | ||
503 | do_sparc_fault(regs, text_fault, write, address); | ||
504 | } | ||
505 | |||
506 | /* This always deals with user addresses. */ | ||
507 | inline void force_user_fault(unsigned long address, int write) | ||
508 | { | ||
509 | struct vm_area_struct *vma; | ||
510 | struct task_struct *tsk = current; | ||
511 | struct mm_struct *mm = tsk->mm; | ||
512 | siginfo_t info; | ||
513 | |||
514 | info.si_code = SEGV_MAPERR; | ||
515 | |||
516 | #if 0 | ||
517 | printk("wf<pid=%d,wr=%d,addr=%08lx>\n", | ||
518 | tsk->pid, write, address); | ||
519 | #endif | ||
520 | down_read(&mm->mmap_sem); | ||
521 | vma = find_vma(mm, address); | ||
522 | if(!vma) | ||
523 | goto bad_area; | ||
524 | if(vma->vm_start <= address) | ||
525 | goto good_area; | ||
526 | if(!(vma->vm_flags & VM_GROWSDOWN)) | ||
527 | goto bad_area; | ||
528 | if(expand_stack(vma, address)) | ||
529 | goto bad_area; | ||
530 | good_area: | ||
531 | info.si_code = SEGV_ACCERR; | ||
532 | if(write) { | ||
533 | if(!(vma->vm_flags & VM_WRITE)) | ||
534 | goto bad_area; | ||
535 | } else { | ||
536 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
537 | goto bad_area; | ||
538 | } | ||
539 | switch (handle_mm_fault(mm, vma, address, write)) { | ||
540 | case VM_FAULT_SIGBUS: | ||
541 | case VM_FAULT_OOM: | ||
542 | goto do_sigbus; | ||
543 | } | ||
544 | up_read(&mm->mmap_sem); | ||
545 | return; | ||
546 | bad_area: | ||
547 | up_read(&mm->mmap_sem); | ||
548 | #if 0 | ||
549 | printk("Window whee %s [%d]: segfaults at %08lx\n", | ||
550 | tsk->comm, tsk->pid, address); | ||
551 | #endif | ||
552 | info.si_signo = SIGSEGV; | ||
553 | info.si_errno = 0; | ||
554 | /* info.si_code set above to make clear whether | ||
555 | this was a SEGV_MAPERR or SEGV_ACCERR fault. */ | ||
556 | info.si_addr = (void __user *) address; | ||
557 | info.si_trapno = 0; | ||
558 | force_sig_info (SIGSEGV, &info, tsk); | ||
559 | return; | ||
560 | |||
561 | do_sigbus: | ||
562 | up_read(&mm->mmap_sem); | ||
563 | info.si_signo = SIGBUS; | ||
564 | info.si_errno = 0; | ||
565 | info.si_code = BUS_ADRERR; | ||
566 | info.si_addr = (void __user *) address; | ||
567 | info.si_trapno = 0; | ||
568 | force_sig_info (SIGBUS, &info, tsk); | ||
569 | } | ||
570 | |||
571 | void window_overflow_fault(void) | ||
572 | { | ||
573 | unsigned long sp; | ||
574 | |||
575 | sp = current_thread_info()->rwbuf_stkptrs[0]; | ||
576 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | ||
577 | force_user_fault(sp + 0x38, 1); | ||
578 | force_user_fault(sp, 1); | ||
579 | } | ||
580 | |||
581 | void window_underflow_fault(unsigned long sp) | ||
582 | { | ||
583 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | ||
584 | force_user_fault(sp + 0x38, 0); | ||
585 | force_user_fault(sp, 0); | ||
586 | } | ||
587 | |||
588 | void window_ret_fault(struct pt_regs *regs) | ||
589 | { | ||
590 | unsigned long sp; | ||
591 | |||
592 | sp = regs->u_regs[UREG_FP]; | ||
593 | if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) | ||
594 | force_user_fault(sp + 0x38, 0); | ||
595 | force_user_fault(sp, 0); | ||
596 | } | ||
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c new file mode 100644 index 000000000000..db27eee3bda1 --- /dev/null +++ b/arch/sparc/mm/generic.c | |||
@@ -0,0 +1,154 @@ | |||
1 | /* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $ | ||
2 | * generic.c: Generic Sparc mm routines that are not dependent upon | ||
3 | * MMU type but are Sparc specific. | ||
4 | * | ||
5 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/swap.h> | ||
11 | #include <linux/pagemap.h> | ||
12 | |||
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | |||
19 | static inline void forget_pte(pte_t page) | ||
20 | { | ||
21 | #if 0 /* old 2.4 code */ | ||
22 | if (pte_none(page)) | ||
23 | return; | ||
24 | if (pte_present(page)) { | ||
25 | unsigned long pfn = pte_pfn(page); | ||
26 | struct page *ptpage; | ||
27 | if (!pfn_valid(pfn)) | ||
28 | return; | ||
29 | ptpage = pfn_to_page(pfn); | ||
30 | if (PageReserved(ptpage)) | ||
31 | return; | ||
32 | page_cache_release(ptpage); | ||
33 | return; | ||
34 | } | ||
35 | swap_free(pte_to_swp_entry(page)); | ||
36 | #else | ||
37 | if (!pte_none(page)) { | ||
38 | printk("forget_pte: old mapping existed!\n"); | ||
39 | BUG(); | ||
40 | } | ||
41 | #endif | ||
42 | } | ||
43 | |||
44 | /* Remap IO memory, the same way as remap_pfn_range(), but use | ||
45 | * the obio memory space. | ||
46 | * | ||
47 | * They use a pgprot that sets PAGE_IO and does not check the | ||
48 | * mem_map table as this is independent of normal memory. | ||
49 | */ | ||
50 | static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size, | ||
51 | unsigned long offset, pgprot_t prot, int space) | ||
52 | { | ||
53 | unsigned long end; | ||
54 | |||
55 | address &= ~PMD_MASK; | ||
56 | end = address + size; | ||
57 | if (end > PMD_SIZE) | ||
58 | end = PMD_SIZE; | ||
59 | do { | ||
60 | pte_t oldpage = *pte; | ||
61 | pte_clear(mm, address, pte); | ||
62 | set_pte(pte, mk_pte_io(offset, prot, space)); | ||
63 | forget_pte(oldpage); | ||
64 | address += PAGE_SIZE; | ||
65 | offset += PAGE_SIZE; | ||
66 | pte++; | ||
67 | } while (address < end); | ||
68 | } | ||
69 | |||
70 | static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, | ||
71 | unsigned long offset, pgprot_t prot, int space) | ||
72 | { | ||
73 | unsigned long end; | ||
74 | |||
75 | address &= ~PGDIR_MASK; | ||
76 | end = address + size; | ||
77 | if (end > PGDIR_SIZE) | ||
78 | end = PGDIR_SIZE; | ||
79 | offset -= address; | ||
80 | do { | ||
81 | pte_t * pte = pte_alloc_map(mm, pmd, address); | ||
82 | if (!pte) | ||
83 | return -ENOMEM; | ||
84 | io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); | ||
85 | address = (address + PMD_SIZE) & PMD_MASK; | ||
86 | pmd++; | ||
87 | } while (address < end); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space) | ||
92 | { | ||
93 | int error = 0; | ||
94 | pgd_t * dir; | ||
95 | unsigned long beg = from; | ||
96 | unsigned long end = from + size; | ||
97 | struct mm_struct *mm = vma->vm_mm; | ||
98 | |||
99 | prot = __pgprot(pg_iobits); | ||
100 | offset -= from; | ||
101 | dir = pgd_offset(mm, from); | ||
102 | flush_cache_range(vma, beg, end); | ||
103 | |||
104 | spin_lock(&mm->page_table_lock); | ||
105 | while (from < end) { | ||
106 | pmd_t *pmd = pmd_alloc(current->mm, dir, from); | ||
107 | error = -ENOMEM; | ||
108 | if (!pmd) | ||
109 | break; | ||
110 | error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); | ||
111 | if (error) | ||
112 | break; | ||
113 | from = (from + PGDIR_SIZE) & PGDIR_MASK; | ||
114 | dir++; | ||
115 | } | ||
116 | spin_unlock(&mm->page_table_lock); | ||
117 | |||
118 | flush_tlb_range(vma, beg, end); | ||
119 | return error; | ||
120 | } | ||
121 | |||
122 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | ||
123 | unsigned long pfn, unsigned long size, pgprot_t prot) | ||
124 | { | ||
125 | int error = 0; | ||
126 | pgd_t * dir; | ||
127 | unsigned long beg = from; | ||
128 | unsigned long end = from + size; | ||
129 | struct mm_struct *mm = vma->vm_mm; | ||
130 | int space = GET_IOSPACE(pfn); | ||
131 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; | ||
132 | |||
133 | prot = __pgprot(pg_iobits); | ||
134 | offset -= from; | ||
135 | dir = pgd_offset(mm, from); | ||
136 | flush_cache_range(vma, beg, end); | ||
137 | |||
138 | spin_lock(&mm->page_table_lock); | ||
139 | while (from < end) { | ||
140 | pmd_t *pmd = pmd_alloc(current->mm, dir, from); | ||
141 | error = -ENOMEM; | ||
142 | if (!pmd) | ||
143 | break; | ||
144 | error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); | ||
145 | if (error) | ||
146 | break; | ||
147 | from = (from + PGDIR_SIZE) & PGDIR_MASK; | ||
148 | dir++; | ||
149 | } | ||
150 | spin_unlock(&mm->page_table_lock); | ||
151 | |||
152 | flush_tlb_range(vma, beg, end); | ||
153 | return error; | ||
154 | } | ||
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c new file mode 100644 index 000000000000..4d8ed9c65182 --- /dev/null +++ b/arch/sparc/mm/highmem.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * highmem.c: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * Provides kernel-static versions of atomic kmap functions originally | ||
5 | * found as inlines in include/asm-sparc/highmem.h. These became | ||
6 | * needed as kmap_atomic() and kunmap_atomic() started getting | ||
7 | * called from within modules. | ||
8 | * -- Tomas Szepe <szepe@pinerecords.com>, September 2002 | ||
9 | * | ||
10 | * But kmap_atomic() and kunmap_atomic() cannot be inlined in | ||
11 | * modules because they are loaded with btfixup-ped functions. | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
16 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
17 | * be used in IRQ contexts, so in some (very limited) cases we need it. | ||
18 | * | ||
19 | * XXX This is an old text. Actually, it's good to use atomic kmaps, | ||
20 | * provided you remember that they are atomic and not try to sleep | ||
21 | * with a kmap taken, much like a spinlock. Non-atomic kmaps are | ||
22 | * shared by CPUs, and so precious, and establishing them requires IPI. | ||
23 | * Atomic kmaps are lightweight and we may have NCPUS more of them. | ||
24 | */ | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/highmem.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/cacheflush.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/fixmap.h> | ||
31 | |||
32 | void *kmap_atomic(struct page *page, enum km_type type) | ||
33 | { | ||
34 | unsigned long idx; | ||
35 | unsigned long vaddr; | ||
36 | |||
37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
38 | inc_preempt_count(); | ||
39 | if (!PageHighMem(page)) | ||
40 | return page_address(page); | ||
41 | |||
42 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
44 | |||
45 | /* XXX Fix - Anton */ | ||
46 | #if 0 | ||
47 | __flush_cache_one(vaddr); | ||
48 | #else | ||
49 | flush_cache_all(); | ||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
53 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
54 | #endif | ||
55 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); | ||
56 | /* XXX Fix - Anton */ | ||
57 | #if 0 | ||
58 | __flush_tlb_one(vaddr); | ||
59 | #else | ||
60 | flush_tlb_all(); | ||
61 | #endif | ||
62 | |||
63 | return (void*) vaddr; | ||
64 | } | ||
65 | |||
66 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
67 | { | ||
68 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
69 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
70 | unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); | ||
71 | |||
72 | if (vaddr < FIXADDR_START) { // FIXME | ||
73 | dec_preempt_count(); | ||
74 | preempt_check_resched(); | ||
75 | return; | ||
76 | } | ||
77 | |||
78 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); | ||
79 | |||
80 | /* XXX Fix - Anton */ | ||
81 | #if 0 | ||
82 | __flush_cache_one(vaddr); | ||
83 | #else | ||
84 | flush_cache_all(); | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * force other mappings to Oops if they'll try to access | ||
89 | * this pte without first remap it | ||
90 | */ | ||
91 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
92 | /* XXX Fix - Anton */ | ||
93 | #if 0 | ||
94 | __flush_tlb_one(vaddr); | ||
95 | #else | ||
96 | flush_tlb_all(); | ||
97 | #endif | ||
98 | #endif | ||
99 | |||
100 | dec_preempt_count(); | ||
101 | preempt_check_resched(); | ||
102 | } | ||
103 | |||
104 | /* We may be fed a pagetable here by ptep_to_xxx and others. */ | ||
105 | struct page *kmap_atomic_to_page(void *ptr) | ||
106 | { | ||
107 | unsigned long idx, vaddr = (unsigned long)ptr; | ||
108 | pte_t *pte; | ||
109 | |||
110 | if (vaddr < SRMMU_NOCACHE_VADDR) | ||
111 | return virt_to_page(ptr); | ||
112 | if (vaddr < PKMAP_BASE) | ||
113 | return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT); | ||
114 | BUG_ON(vaddr < FIXADDR_START); | ||
115 | BUG_ON(vaddr > FIXADDR_TOP); | ||
116 | |||
117 | idx = virt_to_fix(vaddr); | ||
118 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
119 | return pte_page(*pte); | ||
120 | } | ||
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S new file mode 100644 index 000000000000..54b8e764b042 --- /dev/null +++ b/arch/sparc/mm/hypersparc.S | |||
@@ -0,0 +1,413 @@ | |||
1 | /* $Id: hypersparc.S,v 1.18 2001/12/21 04:56:15 davem Exp $ | ||
2 | * hypersparc.S: High speed Hypersparc mmu/cache operations. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #include <asm/ptrace.h> | ||
8 | #include <asm/psr.h> | ||
9 | #include <asm/asm_offsets.h> | ||
10 | #include <asm/asi.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/pgtsrmmu.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | |||
19 | .globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm | ||
20 | .globl hypersparc_flush_cache_range, hypersparc_flush_cache_page | ||
21 | .globl hypersparc_flush_page_to_ram | ||
22 | .globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns | ||
23 | .globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm | ||
24 | .globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page | ||
25 | |||
26 | hypersparc_flush_cache_all: | ||
27 | WINDOW_FLUSH(%g4, %g5) | ||
28 | sethi %hi(vac_cache_size), %g4 | ||
29 | ld [%g4 + %lo(vac_cache_size)], %g5 | ||
30 | sethi %hi(vac_line_size), %g1 | ||
31 | ld [%g1 + %lo(vac_line_size)], %g2 | ||
32 | 1: | ||
33 | subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined | ||
34 | bne 1b | ||
35 | sta %g0, [%g5] ASI_M_FLUSH_CTX | ||
36 | retl | ||
37 | sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache | ||
38 | |||
39 | /* We expand the window flush to get maximum performance. */ | ||
40 | hypersparc_flush_cache_mm: | ||
41 | #ifndef CONFIG_SMP | ||
42 | ld [%o0 + AOFF_mm_context], %g1 | ||
43 | cmp %g1, -1 | ||
44 | be hypersparc_flush_cache_mm_out | ||
45 | #endif | ||
46 | WINDOW_FLUSH(%g4, %g5) | ||
47 | |||
48 | sethi %hi(vac_line_size), %g1 | ||
49 | ld [%g1 + %lo(vac_line_size)], %o1 | ||
50 | sethi %hi(vac_cache_size), %g2 | ||
51 | ld [%g2 + %lo(vac_cache_size)], %o0 | ||
52 | add %o1, %o1, %g1 | ||
53 | add %o1, %g1, %g2 | ||
54 | add %o1, %g2, %g3 | ||
55 | add %o1, %g3, %g4 | ||
56 | add %o1, %g4, %g5 | ||
57 | add %o1, %g5, %o4 | ||
58 | add %o1, %o4, %o5 | ||
59 | |||
60 | /* BLAMMO! */ | ||
61 | 1: | ||
62 | subcc %o0, %o5, %o0 ! hyper_flush_cache_user | ||
63 | sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER | ||
64 | sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER | ||
65 | sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER | ||
66 | sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER | ||
67 | sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER | ||
68 | sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER | ||
69 | sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER | ||
70 | bne 1b | ||
71 | sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER | ||
72 | hypersparc_flush_cache_mm_out: | ||
73 | retl | ||
74 | nop | ||
75 | |||
76 | /* The things we do for performance... */ | ||
77 | hypersparc_flush_cache_range: | ||
78 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
79 | #ifndef CONFIG_SMP | ||
80 | ld [%o0 + AOFF_mm_context], %g1 | ||
81 | cmp %g1, -1 | ||
82 | be hypersparc_flush_cache_range_out | ||
83 | #endif | ||
84 | WINDOW_FLUSH(%g4, %g5) | ||
85 | |||
86 | sethi %hi(vac_line_size), %g1 | ||
87 | ld [%g1 + %lo(vac_line_size)], %o4 | ||
88 | sethi %hi(vac_cache_size), %g2 | ||
89 | ld [%g2 + %lo(vac_cache_size)], %o3 | ||
90 | |||
91 | /* Here comes the fun part... */ | ||
92 | add %o2, (PAGE_SIZE - 1), %o2 | ||
93 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
94 | add %o4, %o4, %o5 | ||
95 | andn %o2, (PAGE_SIZE - 1), %o2 | ||
96 | add %o4, %o5, %g1 | ||
97 | sub %o2, %o1, %g4 | ||
98 | add %o4, %g1, %g2 | ||
99 | sll %o3, 2, %g5 | ||
100 | add %o4, %g2, %g3 | ||
101 | cmp %g4, %g5 | ||
102 | add %o4, %g3, %g4 | ||
103 | blu 0f | ||
104 | add %o4, %g4, %g5 | ||
105 | add %o4, %g5, %g7 | ||
106 | |||
107 | /* Flush entire user space, believe it or not this is quicker | ||
108 | * than page at a time flushings for range > (cache_size<<2). | ||
109 | */ | ||
110 | 1: | ||
111 | subcc %o3, %g7, %o3 | ||
112 | sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER | ||
113 | sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER | ||
114 | sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER | ||
115 | sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER | ||
116 | sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER | ||
117 | sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER | ||
118 | sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER | ||
119 | bne 1b | ||
120 | sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER | ||
121 | retl | ||
122 | nop | ||
123 | |||
124 | /* Below our threshold, flush one page at a time. */ | ||
125 | 0: | ||
126 | ld [%o0 + AOFF_mm_context], %o0 | ||
127 | mov SRMMU_CTX_REG, %g7 | ||
128 | lda [%g7] ASI_M_MMUREGS, %o3 | ||
129 | sta %o0, [%g7] ASI_M_MMUREGS | ||
130 | add %o2, -PAGE_SIZE, %o0 | ||
131 | 1: | ||
132 | or %o0, 0x400, %g7 | ||
133 | lda [%g7] ASI_M_FLUSH_PROBE, %g7 | ||
134 | orcc %g7, 0, %g0 | ||
135 | be,a 3f | ||
136 | mov %o0, %o2 | ||
137 | add %o4, %g5, %g7 | ||
138 | 2: | ||
139 | sub %o2, %g7, %o2 | ||
140 | sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE | ||
141 | sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE | ||
142 | sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE | ||
143 | sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE | ||
144 | sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE | ||
145 | sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE | ||
146 | andcc %o2, 0xffc, %g0 | ||
147 | sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE | ||
148 | bne 2b | ||
149 | sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE | ||
150 | 3: | ||
151 | cmp %o2, %o1 | ||
152 | bne 1b | ||
153 | add %o2, -PAGE_SIZE, %o0 | ||
154 | mov SRMMU_FAULT_STATUS, %g5 | ||
155 | lda [%g5] ASI_M_MMUREGS, %g0 | ||
156 | mov SRMMU_CTX_REG, %g7 | ||
157 | sta %o3, [%g7] ASI_M_MMUREGS | ||
158 | hypersparc_flush_cache_range_out: | ||
159 | retl | ||
160 | nop | ||
161 | |||
162 | /* HyperSparc requires a valid mapping where we are about to flush | ||
163 | * in order to check for a physical tag match during the flush. | ||
164 | */ | ||
165 | /* Verified, my ass... */ | ||
166 | hypersparc_flush_cache_page: | ||
167 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
168 | ld [%o0 + AOFF_mm_context], %g2 | ||
169 | #ifndef CONFIG_SMP | ||
170 | cmp %g2, -1 | ||
171 | be hypersparc_flush_cache_page_out | ||
172 | #endif | ||
173 | WINDOW_FLUSH(%g4, %g5) | ||
174 | |||
175 | sethi %hi(vac_line_size), %g1 | ||
176 | ld [%g1 + %lo(vac_line_size)], %o4 | ||
177 | mov SRMMU_CTX_REG, %o3 | ||
178 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
179 | lda [%o3] ASI_M_MMUREGS, %o2 | ||
180 | sta %g2, [%o3] ASI_M_MMUREGS | ||
181 | or %o1, 0x400, %o5 | ||
182 | lda [%o5] ASI_M_FLUSH_PROBE, %g1 | ||
183 | orcc %g0, %g1, %g0 | ||
184 | be 2f | ||
185 | add %o4, %o4, %o5 | ||
186 | sub %o1, -PAGE_SIZE, %o1 | ||
187 | add %o4, %o5, %g1 | ||
188 | add %o4, %g1, %g2 | ||
189 | add %o4, %g2, %g3 | ||
190 | add %o4, %g3, %g4 | ||
191 | add %o4, %g4, %g5 | ||
192 | add %o4, %g5, %g7 | ||
193 | |||
194 | /* BLAMMO! */ | ||
195 | 1: | ||
196 | sub %o1, %g7, %o1 | ||
197 | sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE | ||
198 | sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE | ||
199 | sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE | ||
200 | sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE | ||
201 | sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE | ||
202 | sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE | ||
203 | andcc %o1, 0xffc, %g0 | ||
204 | sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE | ||
205 | bne 1b | ||
206 | sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE | ||
207 | 2: | ||
208 | mov SRMMU_FAULT_STATUS, %g7 | ||
209 | mov SRMMU_CTX_REG, %g4 | ||
210 | lda [%g7] ASI_M_MMUREGS, %g0 | ||
211 | sta %o2, [%g4] ASI_M_MMUREGS | ||
212 | hypersparc_flush_cache_page_out: | ||
213 | retl | ||
214 | nop | ||
215 | |||
216 | hypersparc_flush_sig_insns: | ||
217 | flush %o1 | ||
218 | retl | ||
219 | flush %o1 + 4 | ||
220 | |||
221 | /* HyperSparc is copy-back. */ | ||
222 | hypersparc_flush_page_to_ram: | ||
223 | sethi %hi(vac_line_size), %g1 | ||
224 | ld [%g1 + %lo(vac_line_size)], %o4 | ||
225 | andn %o0, (PAGE_SIZE - 1), %o0 | ||
226 | add %o4, %o4, %o5 | ||
227 | or %o0, 0x400, %g7 | ||
228 | lda [%g7] ASI_M_FLUSH_PROBE, %g5 | ||
229 | add %o4, %o5, %g1 | ||
230 | orcc %g5, 0, %g0 | ||
231 | be 2f | ||
232 | add %o4, %g1, %g2 | ||
233 | add %o4, %g2, %g3 | ||
234 | sub %o0, -PAGE_SIZE, %o0 | ||
235 | add %o4, %g3, %g4 | ||
236 | add %o4, %g4, %g5 | ||
237 | add %o4, %g5, %g7 | ||
238 | |||
239 | /* BLAMMO! */ | ||
240 | 1: | ||
241 | sub %o0, %g7, %o0 | ||
242 | sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE | ||
243 | sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE | ||
244 | sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE | ||
245 | sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE | ||
246 | sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE | ||
247 | sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE | ||
248 | andcc %o0, 0xffc, %g0 | ||
249 | sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE | ||
250 | bne 1b | ||
251 | sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE | ||
252 | 2: | ||
253 | mov SRMMU_FAULT_STATUS, %g1 | ||
254 | retl | ||
255 | lda [%g1] ASI_M_MMUREGS, %g0 | ||
256 | |||
257 | /* HyperSparc is IO cache coherent. */ | ||
258 | hypersparc_flush_page_for_dma: | ||
259 | retl | ||
260 | nop | ||
261 | |||
262 | /* It was noted that at boot time a TLB flush all in a delay slot | ||
263 | * can deliver an illegal instruction to the processor if the timing | ||
264 | * is just right... | ||
265 | */ | ||
266 | hypersparc_flush_tlb_all: | ||
267 | mov 0x400, %g1 | ||
268 | sta %g0, [%g1] ASI_M_FLUSH_PROBE | ||
269 | retl | ||
270 | nop | ||
271 | |||
272 | hypersparc_flush_tlb_mm: | ||
273 | mov SRMMU_CTX_REG, %g1 | ||
274 | ld [%o0 + AOFF_mm_context], %o1 | ||
275 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
276 | #ifndef CONFIG_SMP | ||
277 | cmp %o1, -1 | ||
278 | be hypersparc_flush_tlb_mm_out | ||
279 | #endif | ||
280 | mov 0x300, %g2 | ||
281 | sta %o1, [%g1] ASI_M_MMUREGS | ||
282 | sta %g0, [%g2] ASI_M_FLUSH_PROBE | ||
283 | hypersparc_flush_tlb_mm_out: | ||
284 | retl | ||
285 | sta %g5, [%g1] ASI_M_MMUREGS | ||
286 | |||
287 | hypersparc_flush_tlb_range: | ||
288 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
289 | mov SRMMU_CTX_REG, %g1 | ||
290 | ld [%o0 + AOFF_mm_context], %o3 | ||
291 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
292 | #ifndef CONFIG_SMP | ||
293 | cmp %o3, -1 | ||
294 | be hypersparc_flush_tlb_range_out | ||
295 | #endif | ||
296 | sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 | ||
297 | sta %o3, [%g1] ASI_M_MMUREGS | ||
298 | and %o1, %o4, %o1 | ||
299 | add %o1, 0x200, %o1 | ||
300 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
301 | 1: | ||
302 | sub %o1, %o4, %o1 | ||
303 | cmp %o1, %o2 | ||
304 | blu,a 1b | ||
305 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
306 | hypersparc_flush_tlb_range_out: | ||
307 | retl | ||
308 | sta %g5, [%g1] ASI_M_MMUREGS | ||
309 | |||
310 | hypersparc_flush_tlb_page: | ||
311 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
312 | mov SRMMU_CTX_REG, %g1 | ||
313 | ld [%o0 + AOFF_mm_context], %o3 | ||
314 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
315 | #ifndef CONFIG_SMP | ||
316 | cmp %o3, -1 | ||
317 | be hypersparc_flush_tlb_page_out | ||
318 | #endif | ||
319 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
320 | sta %o3, [%g1] ASI_M_MMUREGS | ||
321 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
322 | hypersparc_flush_tlb_page_out: | ||
323 | retl | ||
324 | sta %g5, [%g1] ASI_M_MMUREGS | ||
325 | |||
326 | __INIT | ||
327 | |||
328 | /* High speed page clear/copy. */ | ||
329 | hypersparc_bzero_1page: | ||
330 | /* NOTE: This routine has to be shorter than 40insns --jj */ | ||
331 | clr %g1 | ||
332 | mov 32, %g2 | ||
333 | mov 64, %g3 | ||
334 | mov 96, %g4 | ||
335 | mov 128, %g5 | ||
336 | mov 160, %g7 | ||
337 | mov 192, %o2 | ||
338 | mov 224, %o3 | ||
339 | mov 16, %o1 | ||
340 | 1: | ||
341 | stda %g0, [%o0 + %g0] ASI_M_BFILL | ||
342 | stda %g0, [%o0 + %g2] ASI_M_BFILL | ||
343 | stda %g0, [%o0 + %g3] ASI_M_BFILL | ||
344 | stda %g0, [%o0 + %g4] ASI_M_BFILL | ||
345 | stda %g0, [%o0 + %g5] ASI_M_BFILL | ||
346 | stda %g0, [%o0 + %g7] ASI_M_BFILL | ||
347 | stda %g0, [%o0 + %o2] ASI_M_BFILL | ||
348 | stda %g0, [%o0 + %o3] ASI_M_BFILL | ||
349 | subcc %o1, 1, %o1 | ||
350 | bne 1b | ||
351 | add %o0, 256, %o0 | ||
352 | |||
353 | retl | ||
354 | nop | ||
355 | |||
356 | hypersparc_copy_1page: | ||
357 | /* NOTE: This routine has to be shorter than 70insns --jj */ | ||
358 | sub %o1, %o0, %o2 ! difference | ||
359 | mov 16, %g1 | ||
360 | 1: | ||
361 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
362 | add %o0, 32, %o0 | ||
363 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
364 | add %o0, 32, %o0 | ||
365 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
366 | add %o0, 32, %o0 | ||
367 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
368 | add %o0, 32, %o0 | ||
369 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
370 | add %o0, 32, %o0 | ||
371 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
372 | add %o0, 32, %o0 | ||
373 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
374 | add %o0, 32, %o0 | ||
375 | sta %o0, [%o0 + %o2] ASI_M_BCOPY | ||
376 | subcc %g1, 1, %g1 | ||
377 | bne 1b | ||
378 | add %o0, 32, %o0 | ||
379 | |||
380 | retl | ||
381 | nop | ||
382 | |||
383 | .globl hypersparc_setup_blockops | ||
384 | hypersparc_setup_blockops: | ||
385 | sethi %hi(bzero_1page), %o0 | ||
386 | or %o0, %lo(bzero_1page), %o0 | ||
387 | sethi %hi(hypersparc_bzero_1page), %o1 | ||
388 | or %o1, %lo(hypersparc_bzero_1page), %o1 | ||
389 | sethi %hi(hypersparc_copy_1page), %o2 | ||
390 | or %o2, %lo(hypersparc_copy_1page), %o2 | ||
391 | ld [%o1], %o4 | ||
392 | 1: | ||
393 | add %o1, 4, %o1 | ||
394 | st %o4, [%o0] | ||
395 | add %o0, 4, %o0 | ||
396 | cmp %o1, %o2 | ||
397 | bne 1b | ||
398 | ld [%o1], %o4 | ||
399 | sethi %hi(__copy_1page), %o0 | ||
400 | or %o0, %lo(__copy_1page), %o0 | ||
401 | sethi %hi(hypersparc_setup_blockops), %o2 | ||
402 | or %o2, %lo(hypersparc_setup_blockops), %o2 | ||
403 | ld [%o1], %o4 | ||
404 | 1: | ||
405 | add %o1, 4, %o1 | ||
406 | st %o4, [%o0] | ||
407 | add %o0, 4, %o0 | ||
408 | cmp %o1, %o2 | ||
409 | bne 1b | ||
410 | ld [%o1], %o4 | ||
411 | sta %g0, [%g0] ASI_M_FLUSH_IWHOLE | ||
412 | retl | ||
413 | nop | ||
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c new file mode 100644 index 000000000000..a2dea69b2f07 --- /dev/null +++ b/arch/sparc/mm/init.c | |||
@@ -0,0 +1,515 @@ | |||
1 | /* $Id: init.c,v 1.103 2001/11/19 19:03:08 davem Exp $ | ||
2 | * linux/arch/sparc/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be) | ||
6 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | * Copyright (C) 2000 Anton Blanchard (anton@samba.org) | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/swap.h> | ||
22 | #include <linux/initrd.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/highmem.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | |||
27 | #include <asm/system.h> | ||
28 | #include <asm/segment.h> | ||
29 | #include <asm/vac-ops.h> | ||
30 | #include <asm/page.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/vaddrs.h> | ||
33 | #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ | ||
34 | #include <asm/tlb.h> | ||
35 | |||
36 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
37 | |||
38 | unsigned long *sparc_valid_addr_bitmap; | ||
39 | |||
40 | unsigned long phys_base; | ||
41 | unsigned long pfn_base; | ||
42 | |||
43 | unsigned long page_kernel; | ||
44 | |||
45 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; | ||
46 | unsigned long sparc_unmapped_base; | ||
47 | |||
48 | struct pgtable_cache_struct pgt_quicklists; | ||
49 | |||
50 | /* References to section boundaries */ | ||
51 | extern char __init_begin, __init_end, _start, _end, etext , edata; | ||
52 | |||
53 | /* Initial ramdisk setup */ | ||
54 | extern unsigned int sparc_ramdisk_image; | ||
55 | extern unsigned int sparc_ramdisk_size; | ||
56 | |||
57 | unsigned long highstart_pfn, highend_pfn; | ||
58 | |||
59 | pte_t *kmap_pte; | ||
60 | pgprot_t kmap_prot; | ||
61 | |||
62 | #define kmap_get_fixmap_pte(vaddr) \ | ||
63 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) | ||
64 | |||
65 | void __init kmap_init(void) | ||
66 | { | ||
67 | /* cache the first kmap pte */ | ||
68 | kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); | ||
69 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); | ||
70 | } | ||
71 | |||
72 | void show_mem(void) | ||
73 | { | ||
74 | printk("Mem-info:\n"); | ||
75 | show_free_areas(); | ||
76 | printk("Free swap: %6ldkB\n", | ||
77 | nr_swap_pages << (PAGE_SHIFT-10)); | ||
78 | printk("%ld pages of RAM\n", totalram_pages); | ||
79 | printk("%d free pages\n", nr_free_pages()); | ||
80 | #if 0 /* undefined pgtable_cache_size, pgd_cache_size */ | ||
81 | printk("%ld pages in page table cache\n",pgtable_cache_size); | ||
82 | #ifndef CONFIG_SMP | ||
83 | if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) | ||
84 | printk("%ld entries in page dir cache\n",pgd_cache_size); | ||
85 | #endif | ||
86 | #endif | ||
87 | } | ||
88 | |||
89 | void __init sparc_context_init(int numctx) | ||
90 | { | ||
91 | int ctx; | ||
92 | |||
93 | ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); | ||
94 | |||
95 | for(ctx = 0; ctx < numctx; ctx++) { | ||
96 | struct ctx_list *clist; | ||
97 | |||
98 | clist = (ctx_list_pool + ctx); | ||
99 | clist->ctx_number = ctx; | ||
100 | clist->ctx_mm = NULL; | ||
101 | } | ||
102 | ctx_free.next = ctx_free.prev = &ctx_free; | ||
103 | ctx_used.next = ctx_used.prev = &ctx_used; | ||
104 | for(ctx = 0; ctx < numctx; ctx++) | ||
105 | add_to_free_ctxlist(ctx_list_pool + ctx); | ||
106 | } | ||
107 | |||
108 | extern unsigned long cmdline_memory_size; | ||
109 | unsigned long last_valid_pfn; | ||
110 | |||
111 | unsigned long calc_highpages(void) | ||
112 | { | ||
113 | int i; | ||
114 | int nr = 0; | ||
115 | |||
116 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
117 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | ||
118 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | ||
119 | |||
120 | if (end_pfn <= max_low_pfn) | ||
121 | continue; | ||
122 | |||
123 | if (start_pfn < max_low_pfn) | ||
124 | start_pfn = max_low_pfn; | ||
125 | |||
126 | nr += end_pfn - start_pfn; | ||
127 | } | ||
128 | |||
129 | return nr; | ||
130 | } | ||
131 | |||
132 | unsigned long calc_max_low_pfn(void) | ||
133 | { | ||
134 | int i; | ||
135 | unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | ||
136 | unsigned long curr_pfn, last_pfn; | ||
137 | |||
138 | last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; | ||
139 | for (i = 1; sp_banks[i].num_bytes != 0; i++) { | ||
140 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | ||
141 | |||
142 | if (curr_pfn >= tmp) { | ||
143 | if (last_pfn < tmp) | ||
144 | tmp = last_pfn; | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | ||
149 | } | ||
150 | |||
151 | return tmp; | ||
152 | } | ||
153 | |||
154 | unsigned long __init bootmem_init(unsigned long *pages_avail) | ||
155 | { | ||
156 | unsigned long bootmap_size, start_pfn; | ||
157 | unsigned long end_of_phys_memory = 0UL; | ||
158 | unsigned long bootmap_pfn, bytes_avail, size; | ||
159 | int i; | ||
160 | |||
161 | bytes_avail = 0UL; | ||
162 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
163 | end_of_phys_memory = sp_banks[i].base_addr + | ||
164 | sp_banks[i].num_bytes; | ||
165 | bytes_avail += sp_banks[i].num_bytes; | ||
166 | if (cmdline_memory_size) { | ||
167 | if (bytes_avail > cmdline_memory_size) { | ||
168 | unsigned long slack = bytes_avail - cmdline_memory_size; | ||
169 | |||
170 | bytes_avail -= slack; | ||
171 | end_of_phys_memory -= slack; | ||
172 | |||
173 | sp_banks[i].num_bytes -= slack; | ||
174 | if (sp_banks[i].num_bytes == 0) { | ||
175 | sp_banks[i].base_addr = 0xdeadbeef; | ||
176 | } else { | ||
177 | sp_banks[i+1].num_bytes = 0; | ||
178 | sp_banks[i+1].base_addr = 0xdeadbeef; | ||
179 | } | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | |||
185 | /* Start with page aligned address of last symbol in kernel | ||
186 | * image. | ||
187 | */ | ||
188 | start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); | ||
189 | |||
190 | /* Now shift down to get the real physical page frame number. */ | ||
191 | start_pfn >>= PAGE_SHIFT; | ||
192 | |||
193 | bootmap_pfn = start_pfn; | ||
194 | |||
195 | max_pfn = end_of_phys_memory >> PAGE_SHIFT; | ||
196 | |||
197 | max_low_pfn = max_pfn; | ||
198 | highstart_pfn = highend_pfn = max_pfn; | ||
199 | |||
200 | if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { | ||
201 | highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); | ||
202 | max_low_pfn = calc_max_low_pfn(); | ||
203 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | ||
204 | calc_highpages() >> (20 - PAGE_SHIFT)); | ||
205 | } | ||
206 | |||
207 | #ifdef CONFIG_BLK_DEV_INITRD | ||
208 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ | ||
209 | if (sparc_ramdisk_image) { | ||
210 | if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) | ||
211 | sparc_ramdisk_image -= KERNBASE; | ||
212 | initrd_start = sparc_ramdisk_image + phys_base; | ||
213 | initrd_end = initrd_start + sparc_ramdisk_size; | ||
214 | if (initrd_end > end_of_phys_memory) { | ||
215 | printk(KERN_CRIT "initrd extends beyond end of memory " | ||
216 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | ||
217 | initrd_end, end_of_phys_memory); | ||
218 | initrd_start = 0; | ||
219 | } | ||
220 | if (initrd_start) { | ||
221 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | ||
222 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | ||
223 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | ||
224 | } | ||
225 | } | ||
226 | #endif | ||
227 | /* Initialize the boot-time allocator. */ | ||
228 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, | ||
229 | max_low_pfn); | ||
230 | |||
231 | /* Now register the available physical memory with the | ||
232 | * allocator. | ||
233 | */ | ||
234 | *pages_avail = 0; | ||
235 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
236 | unsigned long curr_pfn, last_pfn; | ||
237 | |||
238 | curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | ||
239 | if (curr_pfn >= max_low_pfn) | ||
240 | break; | ||
241 | |||
242 | last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | ||
243 | if (last_pfn > max_low_pfn) | ||
244 | last_pfn = max_low_pfn; | ||
245 | |||
246 | /* | ||
247 | * .. finally, did all the rounding and playing | ||
248 | * around just make the area go away? | ||
249 | */ | ||
250 | if (last_pfn <= curr_pfn) | ||
251 | continue; | ||
252 | |||
253 | size = (last_pfn - curr_pfn) << PAGE_SHIFT; | ||
254 | *pages_avail += last_pfn - curr_pfn; | ||
255 | |||
256 | free_bootmem(sp_banks[i].base_addr, size); | ||
257 | } | ||
258 | |||
259 | #ifdef CONFIG_BLK_DEV_INITRD | ||
260 | if (initrd_start) { | ||
261 | /* Reserve the initrd image area. */ | ||
262 | size = initrd_end - initrd_start; | ||
263 | reserve_bootmem(initrd_start, size); | ||
264 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
265 | |||
266 | initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; | ||
267 | initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; | ||
268 | } | ||
269 | #endif | ||
270 | /* Reserve the kernel text/data/bss. */ | ||
271 | size = (start_pfn << PAGE_SHIFT) - phys_base; | ||
272 | reserve_bootmem(phys_base, size); | ||
273 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
274 | |||
275 | /* Reserve the bootmem map. We do not account for it | ||
276 | * in pages_avail because we will release that memory | ||
277 | * in free_all_bootmem. | ||
278 | */ | ||
279 | size = bootmap_size; | ||
280 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | ||
281 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
282 | |||
283 | return max_pfn; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * check_pgt_cache | ||
288 | * | ||
289 | * This is called at the end of unmapping of VMA (zap_page_range), | ||
290 | * to rescan the page cache for architecture specific things, | ||
291 | * presumably something like sun4/sun4c PMEGs. Most architectures | ||
292 | * define check_pgt_cache empty. | ||
293 | * | ||
294 | * We simply copy the 2.4 implementation for now. | ||
295 | */ | ||
296 | int pgt_cache_water[2] = { 25, 50 }; | ||
297 | |||
298 | void check_pgt_cache(void) | ||
299 | { | ||
300 | do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * paging_init() sets up the page tables: We call the MMU specific | ||
305 | * init routine based upon the Sun model type on the Sparc. | ||
306 | * | ||
307 | */ | ||
308 | extern void sun4c_paging_init(void); | ||
309 | extern void srmmu_paging_init(void); | ||
310 | extern void device_scan(void); | ||
311 | |||
312 | void __init paging_init(void) | ||
313 | { | ||
314 | switch(sparc_cpu_model) { | ||
315 | case sun4c: | ||
316 | case sun4e: | ||
317 | case sun4: | ||
318 | sun4c_paging_init(); | ||
319 | sparc_unmapped_base = 0xe0000000; | ||
320 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); | ||
321 | break; | ||
322 | case sun4m: | ||
323 | case sun4d: | ||
324 | srmmu_paging_init(); | ||
325 | sparc_unmapped_base = 0x50000000; | ||
326 | BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); | ||
327 | break; | ||
328 | default: | ||
329 | prom_printf("paging_init: Cannot init paging on this Sparc\n"); | ||
330 | prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); | ||
331 | prom_printf("paging_init: Halting...\n"); | ||
332 | prom_halt(); | ||
333 | }; | ||
334 | |||
335 | /* Initialize the protection map with non-constant, MMU dependent values. */ | ||
336 | protection_map[0] = PAGE_NONE; | ||
337 | protection_map[1] = PAGE_READONLY; | ||
338 | protection_map[2] = PAGE_COPY; | ||
339 | protection_map[3] = PAGE_COPY; | ||
340 | protection_map[4] = PAGE_READONLY; | ||
341 | protection_map[5] = PAGE_READONLY; | ||
342 | protection_map[6] = PAGE_COPY; | ||
343 | protection_map[7] = PAGE_COPY; | ||
344 | protection_map[8] = PAGE_NONE; | ||
345 | protection_map[9] = PAGE_READONLY; | ||
346 | protection_map[10] = PAGE_SHARED; | ||
347 | protection_map[11] = PAGE_SHARED; | ||
348 | protection_map[12] = PAGE_READONLY; | ||
349 | protection_map[13] = PAGE_READONLY; | ||
350 | protection_map[14] = PAGE_SHARED; | ||
351 | protection_map[15] = PAGE_SHARED; | ||
352 | btfixup(); | ||
353 | device_scan(); | ||
354 | } | ||
355 | |||
356 | struct cache_palias *sparc_aliases; | ||
357 | |||
358 | static void __init taint_real_pages(void) | ||
359 | { | ||
360 | int i; | ||
361 | |||
362 | for (i = 0; sp_banks[i].num_bytes; i++) { | ||
363 | unsigned long start, end; | ||
364 | |||
365 | start = sp_banks[i].base_addr; | ||
366 | end = start + sp_banks[i].num_bytes; | ||
367 | |||
368 | while (start < end) { | ||
369 | set_bit(start >> 20, sparc_valid_addr_bitmap); | ||
370 | start += PAGE_SIZE; | ||
371 | } | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void map_high_region(unsigned long start_pfn, unsigned long end_pfn) | ||
376 | { | ||
377 | unsigned long tmp; | ||
378 | |||
379 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
380 | printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); | ||
381 | #endif | ||
382 | |||
383 | for (tmp = start_pfn; tmp < end_pfn; tmp++) { | ||
384 | struct page *page = pfn_to_page(tmp); | ||
385 | |||
386 | ClearPageReserved(page); | ||
387 | set_bit(PG_highmem, &page->flags); | ||
388 | set_page_count(page, 1); | ||
389 | __free_page(page); | ||
390 | totalhigh_pages++; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | void __init mem_init(void) | ||
395 | { | ||
396 | int codepages = 0; | ||
397 | int datapages = 0; | ||
398 | int initpages = 0; | ||
399 | int reservedpages = 0; | ||
400 | int i; | ||
401 | |||
402 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | ||
403 | prom_printf("BUG: fixmap and pkmap areas overlap\n"); | ||
404 | prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n", | ||
405 | PKMAP_BASE, | ||
406 | (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
407 | FIXADDR_START); | ||
408 | prom_printf("Please mail sparclinux@vger.kernel.org.\n"); | ||
409 | prom_halt(); | ||
410 | } | ||
411 | |||
412 | |||
413 | /* Saves us work later. */ | ||
414 | memset((void *)&empty_zero_page, 0, PAGE_SIZE); | ||
415 | |||
416 | i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); | ||
417 | i += 1; | ||
418 | sparc_valid_addr_bitmap = (unsigned long *) | ||
419 | __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); | ||
420 | |||
421 | if (sparc_valid_addr_bitmap == NULL) { | ||
422 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | ||
423 | prom_halt(); | ||
424 | } | ||
425 | memset(sparc_valid_addr_bitmap, 0, i << 2); | ||
426 | |||
427 | taint_real_pages(); | ||
428 | |||
429 | max_mapnr = last_valid_pfn - pfn_base; | ||
430 | high_memory = __va(max_low_pfn << PAGE_SHIFT); | ||
431 | |||
432 | totalram_pages = free_all_bootmem(); | ||
433 | |||
434 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
435 | unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; | ||
436 | unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; | ||
437 | |||
438 | num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT; | ||
439 | |||
440 | if (end_pfn <= highstart_pfn) | ||
441 | continue; | ||
442 | |||
443 | if (start_pfn < highstart_pfn) | ||
444 | start_pfn = highstart_pfn; | ||
445 | |||
446 | map_high_region(start_pfn, end_pfn); | ||
447 | } | ||
448 | |||
449 | totalram_pages += totalhigh_pages; | ||
450 | |||
451 | codepages = (((unsigned long) &etext) - ((unsigned long)&_start)); | ||
452 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | ||
453 | datapages = (((unsigned long) &edata) - ((unsigned long)&etext)); | ||
454 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | ||
455 | initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); | ||
456 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | ||
457 | |||
458 | /* Ignore memory holes for the purpose of counting reserved pages */ | ||
459 | for (i=0; i < max_low_pfn; i++) | ||
460 | if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap) | ||
461 | && PageReserved(pfn_to_page(i))) | ||
462 | reservedpages++; | ||
463 | |||
464 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | ||
465 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
466 | num_physpages << (PAGE_SHIFT - 10), | ||
467 | codepages << (PAGE_SHIFT-10), | ||
468 | reservedpages << (PAGE_SHIFT - 10), | ||
469 | datapages << (PAGE_SHIFT-10), | ||
470 | initpages << (PAGE_SHIFT-10), | ||
471 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
472 | } | ||
473 | |||
474 | void free_initmem (void) | ||
475 | { | ||
476 | unsigned long addr; | ||
477 | |||
478 | addr = (unsigned long)(&__init_begin); | ||
479 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
480 | struct page *p; | ||
481 | |||
482 | p = virt_to_page(addr); | ||
483 | |||
484 | ClearPageReserved(p); | ||
485 | set_page_count(p, 1); | ||
486 | __free_page(p); | ||
487 | totalram_pages++; | ||
488 | num_physpages++; | ||
489 | } | ||
490 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); | ||
491 | } | ||
492 | |||
493 | #ifdef CONFIG_BLK_DEV_INITRD | ||
494 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
495 | { | ||
496 | if (start < end) | ||
497 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
498 | for (; start < end; start += PAGE_SIZE) { | ||
499 | struct page *p = virt_to_page(start); | ||
500 | |||
501 | ClearPageReserved(p); | ||
502 | set_page_count(p, 1); | ||
503 | __free_page(p); | ||
504 | num_physpages++; | ||
505 | } | ||
506 | } | ||
507 | #endif | ||
508 | |||
509 | void sparc_flush_page_to_ram(struct page *page) | ||
510 | { | ||
511 | unsigned long vaddr = (unsigned long)page_address(page); | ||
512 | |||
513 | if (vaddr) | ||
514 | __flush_page_to_ram(vaddr); | ||
515 | } | ||
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c new file mode 100644 index 000000000000..eefffa1dc5de --- /dev/null +++ b/arch/sparc/mm/io-unit.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $ | ||
2 | * io-unit.c: IO-UNIT specific routines for memory management. | ||
3 | * | ||
4 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ | ||
14 | #include <linux/bitops.h> | ||
15 | |||
16 | #include <asm/scatterlist.h> | ||
17 | #include <asm/pgalloc.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/sbus.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <asm/io-unit.h> | ||
22 | #include <asm/mxcc.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/dma.h> | ||
26 | |||
27 | /* #define IOUNIT_DEBUG */ | ||
28 | #ifdef IOUNIT_DEBUG | ||
29 | #define IOD(x) printk(x) | ||
30 | #else | ||
31 | #define IOD(x) do { } while (0) | ||
32 | #endif | ||
33 | |||
34 | #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID) | ||
35 | #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM) | ||
36 | |||
37 | void __init | ||
38 | iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus) | ||
39 | { | ||
40 | iopte_t *xpt, *xptend; | ||
41 | struct iounit_struct *iounit; | ||
42 | struct linux_prom_registers iommu_promregs[PROMREG_MAX]; | ||
43 | struct resource r; | ||
44 | |||
45 | iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC); | ||
46 | |||
47 | memset(iounit, 0, sizeof(*iounit)); | ||
48 | iounit->limit[0] = IOUNIT_BMAP1_START; | ||
49 | iounit->limit[1] = IOUNIT_BMAP2_START; | ||
50 | iounit->limit[2] = IOUNIT_BMAPM_START; | ||
51 | iounit->limit[3] = IOUNIT_BMAPM_END; | ||
52 | iounit->rotor[1] = IOUNIT_BMAP2_START; | ||
53 | iounit->rotor[2] = IOUNIT_BMAPM_START; | ||
54 | |||
55 | xpt = NULL; | ||
56 | if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs, | ||
57 | sizeof(iommu_promregs)) != -1) { | ||
58 | prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3); | ||
59 | memset(&r, 0, sizeof(r)); | ||
60 | r.flags = iommu_promregs[2].which_io; | ||
61 | r.start = iommu_promregs[2].phys_addr; | ||
62 | xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT"); | ||
63 | } | ||
64 | if(!xpt) panic("Cannot map External Page Table."); | ||
65 | |||
66 | sbus->iommu = (struct iommu_struct *)iounit; | ||
67 | iounit->page_table = xpt; | ||
68 | |||
69 | for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); | ||
70 | xpt < xptend;) | ||
71 | iopte_val(*xpt++) = 0; | ||
72 | } | ||
73 | |||
74 | /* One has to hold iounit->lock to call this */ | ||
75 | static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) | ||
76 | { | ||
77 | int i, j, k, npages; | ||
78 | unsigned long rotor, scan, limit; | ||
79 | iopte_t iopte; | ||
80 | |||
81 | npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
82 | |||
83 | /* A tiny bit of magic ingredience :) */ | ||
84 | switch (npages) { | ||
85 | case 1: i = 0x0231; break; | ||
86 | case 2: i = 0x0132; break; | ||
87 | default: i = 0x0213; break; | ||
88 | } | ||
89 | |||
90 | IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); | ||
91 | |||
92 | next: j = (i & 15); | ||
93 | rotor = iounit->rotor[j - 1]; | ||
94 | limit = iounit->limit[j]; | ||
95 | scan = rotor; | ||
96 | nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); | ||
97 | if (scan + npages > limit) { | ||
98 | if (limit != rotor) { | ||
99 | limit = rotor; | ||
100 | scan = iounit->limit[j - 1]; | ||
101 | goto nexti; | ||
102 | } | ||
103 | i >>= 4; | ||
104 | if (!(i & 15)) | ||
105 | panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); | ||
106 | goto next; | ||
107 | } | ||
108 | for (k = 1, scan++; k < npages; k++) | ||
109 | if (test_bit(scan++, iounit->bmap)) | ||
110 | goto nexti; | ||
111 | iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1]; | ||
112 | scan -= npages; | ||
113 | iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); | ||
114 | vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); | ||
115 | for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { | ||
116 | set_bit(scan, iounit->bmap); | ||
117 | iounit->page_table[scan] = iopte; | ||
118 | } | ||
119 | IOD(("%08lx\n", vaddr)); | ||
120 | return vaddr; | ||
121 | } | ||
122 | |||
123 | static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus) | ||
124 | { | ||
125 | unsigned long ret, flags; | ||
126 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
127 | |||
128 | spin_lock_irqsave(&iounit->lock, flags); | ||
129 | ret = iounit_get_area(iounit, (unsigned long)vaddr, len); | ||
130 | spin_unlock_irqrestore(&iounit->lock, flags); | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
135 | { | ||
136 | unsigned long flags; | ||
137 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
138 | |||
139 | /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ | ||
140 | spin_lock_irqsave(&iounit->lock, flags); | ||
141 | while (sz != 0) { | ||
142 | --sz; | ||
143 | sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); | ||
144 | sg[sz].dvma_length = sg[sz].length; | ||
145 | } | ||
146 | spin_unlock_irqrestore(&iounit->lock, flags); | ||
147 | } | ||
148 | |||
149 | static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus) | ||
150 | { | ||
151 | unsigned long flags; | ||
152 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
153 | |||
154 | spin_lock_irqsave(&iounit->lock, flags); | ||
155 | len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
156 | vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; | ||
157 | IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); | ||
158 | for (len += vaddr; vaddr < len; vaddr++) | ||
159 | clear_bit(vaddr, iounit->bmap); | ||
160 | spin_unlock_irqrestore(&iounit->lock, flags); | ||
161 | } | ||
162 | |||
163 | static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
164 | { | ||
165 | unsigned long flags; | ||
166 | unsigned long vaddr, len; | ||
167 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
168 | |||
169 | spin_lock_irqsave(&iounit->lock, flags); | ||
170 | while (sz != 0) { | ||
171 | --sz; | ||
172 | len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
173 | vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; | ||
174 | IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); | ||
175 | for (len += vaddr; vaddr < len; vaddr++) | ||
176 | clear_bit(vaddr, iounit->bmap); | ||
177 | } | ||
178 | spin_unlock_irqrestore(&iounit->lock, flags); | ||
179 | } | ||
180 | |||
181 | #ifdef CONFIG_SBUS | ||
182 | static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len) | ||
183 | { | ||
184 | unsigned long page, end; | ||
185 | pgprot_t dvma_prot; | ||
186 | iopte_t *iopte; | ||
187 | struct sbus_bus *sbus; | ||
188 | |||
189 | *pba = addr; | ||
190 | |||
191 | dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); | ||
192 | end = PAGE_ALIGN((addr + len)); | ||
193 | while(addr < end) { | ||
194 | page = va; | ||
195 | { | ||
196 | pgd_t *pgdp; | ||
197 | pmd_t *pmdp; | ||
198 | pte_t *ptep; | ||
199 | long i; | ||
200 | |||
201 | pgdp = pgd_offset(&init_mm, addr); | ||
202 | pmdp = pmd_offset(pgdp, addr); | ||
203 | ptep = pte_offset_map(pmdp, addr); | ||
204 | |||
205 | set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); | ||
206 | |||
207 | i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); | ||
208 | |||
209 | for_each_sbus(sbus) { | ||
210 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
211 | |||
212 | iopte = (iopte_t *)(iounit->page_table + i); | ||
213 | *iopte = MKIOPTE(__pa(page)); | ||
214 | } | ||
215 | } | ||
216 | addr += PAGE_SIZE; | ||
217 | va += PAGE_SIZE; | ||
218 | } | ||
219 | flush_cache_all(); | ||
220 | flush_tlb_all(); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static void iounit_unmap_dma_area(unsigned long addr, int len) | ||
226 | { | ||
227 | /* XXX Somebody please fill this in */ | ||
228 | } | ||
229 | |||
230 | /* XXX We do not pass sbus device here, bad. */ | ||
231 | static struct page *iounit_translate_dvma(unsigned long addr) | ||
232 | { | ||
233 | struct sbus_bus *sbus = sbus_root; /* They are all the same */ | ||
234 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
235 | int i; | ||
236 | iopte_t *iopte; | ||
237 | |||
238 | i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); | ||
239 | iopte = (iopte_t *)(iounit->page_table + i); | ||
240 | return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */ | ||
241 | } | ||
242 | #endif | ||
243 | |||
244 | static char *iounit_lockarea(char *vaddr, unsigned long len) | ||
245 | { | ||
246 | /* FIXME: Write this */ | ||
247 | return vaddr; | ||
248 | } | ||
249 | |||
250 | static void iounit_unlockarea(char *vaddr, unsigned long len) | ||
251 | { | ||
252 | /* FIXME: Write this */ | ||
253 | } | ||
254 | |||
255 | void __init ld_mmu_iounit(void) | ||
256 | { | ||
257 | BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0); | ||
258 | BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP); | ||
259 | |||
260 | BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM); | ||
261 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM); | ||
262 | BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM); | ||
263 | BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM); | ||
264 | |||
265 | #ifdef CONFIG_SBUS | ||
266 | BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM); | ||
267 | BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM); | ||
268 | BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM); | ||
269 | #endif | ||
270 | } | ||
271 | |||
272 | __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size) | ||
273 | { | ||
274 | int i, j, k, npages; | ||
275 | unsigned long rotor, scan, limit; | ||
276 | unsigned long flags; | ||
277 | __u32 ret; | ||
278 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
279 | |||
280 | npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
281 | i = 0x0213; | ||
282 | spin_lock_irqsave(&iounit->lock, flags); | ||
283 | next: j = (i & 15); | ||
284 | rotor = iounit->rotor[j - 1]; | ||
285 | limit = iounit->limit[j]; | ||
286 | scan = rotor; | ||
287 | nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); | ||
288 | if (scan + npages > limit) { | ||
289 | if (limit != rotor) { | ||
290 | limit = rotor; | ||
291 | scan = iounit->limit[j - 1]; | ||
292 | goto nexti; | ||
293 | } | ||
294 | i >>= 4; | ||
295 | if (!(i & 15)) | ||
296 | panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size); | ||
297 | goto next; | ||
298 | } | ||
299 | for (k = 1, scan++; k < npages; k++) | ||
300 | if (test_bit(scan++, iounit->bmap)) | ||
301 | goto nexti; | ||
302 | iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1]; | ||
303 | scan -= npages; | ||
304 | ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT); | ||
305 | for (k = 0; k < npages; k++, scan++) | ||
306 | set_bit(scan, iounit->bmap); | ||
307 | spin_unlock_irqrestore(&iounit->lock, flags); | ||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus) | ||
312 | { | ||
313 | int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; | ||
314 | struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; | ||
315 | |||
316 | iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK)); | ||
317 | return vaddr + (((unsigned long)addr) & ~PAGE_MASK); | ||
318 | } | ||
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c new file mode 100644 index 000000000000..489bf68d5f05 --- /dev/null +++ b/arch/sparc/mm/iommu.c | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * iommu.c: IOMMU specific routines for memory management. | ||
3 | * | ||
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) | ||
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ | ||
16 | |||
17 | #include <asm/scatterlist.h> | ||
18 | #include <asm/pgalloc.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/sbus.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/mxcc.h> | ||
23 | #include <asm/mbus.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | #include <asm/bitext.h> | ||
27 | #include <asm/iommu.h> | ||
28 | #include <asm/dma.h> | ||
29 | |||
30 | /* | ||
31 | * This can be sized dynamically, but we will do this | ||
32 | * only when we have a guidance about actual I/O pressures. | ||
33 | */ | ||
34 | #define IOMMU_RNGE IOMMU_RNGE_256MB | ||
35 | #define IOMMU_START 0xF0000000 | ||
36 | #define IOMMU_WINSIZE (256*1024*1024U) | ||
37 | #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ | ||
38 | #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ | ||
39 | |||
40 | /* srmmu.c */ | ||
41 | extern int viking_mxcc_present; | ||
42 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) | ||
43 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) | ||
44 | extern int flush_page_for_dma_global; | ||
45 | static int viking_flush; | ||
46 | /* viking.S */ | ||
47 | extern void viking_flush_page(unsigned long page); | ||
48 | extern void viking_mxcc_flush_page(unsigned long page); | ||
49 | |||
50 | /* | ||
51 | * Values precomputed according to CPU type. | ||
52 | */ | ||
53 | static unsigned int ioperm_noc; /* Consistent mapping iopte flags */ | ||
54 | static pgprot_t dvma_prot; /* Consistent mapping pte flags */ | ||
55 | |||
56 | #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) | ||
57 | #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ) | ||
58 | |||
59 | void __init | ||
60 | iommu_init(int iommund, struct sbus_bus *sbus) | ||
61 | { | ||
62 | unsigned int impl, vers; | ||
63 | unsigned long tmp; | ||
64 | struct iommu_struct *iommu; | ||
65 | struct linux_prom_registers iommu_promregs[PROMREG_MAX]; | ||
66 | struct resource r; | ||
67 | unsigned long *bitmap; | ||
68 | |||
69 | iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC); | ||
70 | if (!iommu) { | ||
71 | prom_printf("Unable to allocate iommu structure\n"); | ||
72 | prom_halt(); | ||
73 | } | ||
74 | iommu->regs = NULL; | ||
75 | if (prom_getproperty(iommund, "reg", (void *) iommu_promregs, | ||
76 | sizeof(iommu_promregs)) != -1) { | ||
77 | memset(&r, 0, sizeof(r)); | ||
78 | r.flags = iommu_promregs[0].which_io; | ||
79 | r.start = iommu_promregs[0].phys_addr; | ||
80 | iommu->regs = (struct iommu_regs *) | ||
81 | sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs"); | ||
82 | } | ||
83 | if (!iommu->regs) { | ||
84 | prom_printf("Cannot map IOMMU registers\n"); | ||
85 | prom_halt(); | ||
86 | } | ||
87 | impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; | ||
88 | vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; | ||
89 | tmp = iommu->regs->control; | ||
90 | tmp &= ~(IOMMU_CTRL_RNGE); | ||
91 | tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); | ||
92 | iommu->regs->control = tmp; | ||
93 | iommu_invalidate(iommu->regs); | ||
94 | iommu->start = IOMMU_START; | ||
95 | iommu->end = 0xffffffff; | ||
96 | |||
97 | /* Allocate IOMMU page table */ | ||
98 | /* Stupid alignment constraints give me a headache. | ||
99 | We need 256K or 512K or 1M or 2M area aligned to | ||
100 | its size and current gfp will fortunately give | ||
101 | it to us. */ | ||
102 | tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER); | ||
103 | if (!tmp) { | ||
104 | prom_printf("Unable to allocate iommu table [0x%08x]\n", | ||
105 | IOMMU_NPTES*sizeof(iopte_t)); | ||
106 | prom_halt(); | ||
107 | } | ||
108 | iommu->page_table = (iopte_t *)tmp; | ||
109 | |||
110 | /* Initialize new table. */ | ||
111 | memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); | ||
112 | flush_cache_all(); | ||
113 | flush_tlb_all(); | ||
114 | iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4; | ||
115 | iommu_invalidate(iommu->regs); | ||
116 | |||
117 | bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL); | ||
118 | if (!bitmap) { | ||
119 | prom_printf("Unable to allocate iommu bitmap [%d]\n", | ||
120 | (int)(IOMMU_NPTES>>3)); | ||
121 | prom_halt(); | ||
122 | } | ||
123 | bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES); | ||
124 | /* To be coherent on HyperSparc, the page color of DVMA | ||
125 | * and physical addresses must match. | ||
126 | */ | ||
127 | if (srmmu_modtype == HyperSparc) | ||
128 | iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; | ||
129 | else | ||
130 | iommu->usemap.num_colors = 1; | ||
131 | |||
132 | printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n", | ||
133 | impl, vers, iommu->page_table, | ||
134 | (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES); | ||
135 | |||
136 | sbus->iommu = iommu; | ||
137 | } | ||
138 | |||
139 | /* This begs to be btfixup-ed by srmmu. */ | ||
140 | /* Flush the iotlb entries to ram. */ | ||
141 | /* This could be better if we didn't have to flush whole pages. */ | ||
142 | static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) | ||
143 | { | ||
144 | unsigned long start; | ||
145 | unsigned long end; | ||
146 | |||
147 | start = (unsigned long)iopte & PAGE_MASK; | ||
148 | end = PAGE_ALIGN(start + niopte*sizeof(iopte_t)); | ||
149 | if (viking_mxcc_present) { | ||
150 | while(start < end) { | ||
151 | viking_mxcc_flush_page(start); | ||
152 | start += PAGE_SIZE; | ||
153 | } | ||
154 | } else if (viking_flush) { | ||
155 | while(start < end) { | ||
156 | viking_flush_page(start); | ||
157 | start += PAGE_SIZE; | ||
158 | } | ||
159 | } else { | ||
160 | while(start < end) { | ||
161 | __flush_page_to_ram(start); | ||
162 | start += PAGE_SIZE; | ||
163 | } | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus) | ||
168 | { | ||
169 | struct iommu_struct *iommu = sbus->iommu; | ||
170 | int ioptex; | ||
171 | iopte_t *iopte, *iopte0; | ||
172 | unsigned int busa, busa0; | ||
173 | int i; | ||
174 | |||
175 | /* page color = pfn of page */ | ||
176 | ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); | ||
177 | if (ioptex < 0) | ||
178 | panic("iommu out"); | ||
179 | busa0 = iommu->start + (ioptex << PAGE_SHIFT); | ||
180 | iopte0 = &iommu->page_table[ioptex]; | ||
181 | |||
182 | busa = busa0; | ||
183 | iopte = iopte0; | ||
184 | for (i = 0; i < npages; i++) { | ||
185 | iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); | ||
186 | iommu_invalidate_page(iommu->regs, busa); | ||
187 | busa += PAGE_SIZE; | ||
188 | iopte++; | ||
189 | page++; | ||
190 | } | ||
191 | |||
192 | iommu_flush_iotlb(iopte0, npages); | ||
193 | |||
194 | return busa0; | ||
195 | } | ||
196 | |||
197 | static u32 iommu_get_scsi_one(char *vaddr, unsigned int len, | ||
198 | struct sbus_bus *sbus) | ||
199 | { | ||
200 | unsigned long off; | ||
201 | int npages; | ||
202 | struct page *page; | ||
203 | u32 busa; | ||
204 | |||
205 | off = (unsigned long)vaddr & ~PAGE_MASK; | ||
206 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
207 | page = virt_to_page((unsigned long)vaddr & PAGE_MASK); | ||
208 | busa = iommu_get_one(page, npages, sbus); | ||
209 | return busa + off; | ||
210 | } | ||
211 | |||
212 | static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | ||
213 | { | ||
214 | return iommu_get_scsi_one(vaddr, len, sbus); | ||
215 | } | ||
216 | |||
217 | static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | ||
218 | { | ||
219 | flush_page_for_dma(0); | ||
220 | return iommu_get_scsi_one(vaddr, len, sbus); | ||
221 | } | ||
222 | |||
223 | static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus) | ||
224 | { | ||
225 | unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; | ||
226 | |||
227 | while(page < ((unsigned long)(vaddr + len))) { | ||
228 | flush_page_for_dma(page); | ||
229 | page += PAGE_SIZE; | ||
230 | } | ||
231 | return iommu_get_scsi_one(vaddr, len, sbus); | ||
232 | } | ||
233 | |||
234 | static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
235 | { | ||
236 | int n; | ||
237 | |||
238 | while (sz != 0) { | ||
239 | --sz; | ||
240 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
241 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||
242 | sg->dvma_length = (__u32) sg->length; | ||
243 | sg++; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
248 | { | ||
249 | int n; | ||
250 | |||
251 | flush_page_for_dma(0); | ||
252 | while (sz != 0) { | ||
253 | --sz; | ||
254 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
255 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||
256 | sg->dvma_length = (__u32) sg->length; | ||
257 | sg++; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
262 | { | ||
263 | unsigned long page, oldpage = 0; | ||
264 | int n, i; | ||
265 | |||
266 | while(sz != 0) { | ||
267 | --sz; | ||
268 | |||
269 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
270 | |||
271 | /* | ||
272 | * We expect unmapped highmem pages to be not in the cache. | ||
273 | * XXX Is this a good assumption? | ||
274 | * XXX What if someone else unmaps it here and races us? | ||
275 | */ | ||
276 | if ((page = (unsigned long) page_address(sg->page)) != 0) { | ||
277 | for (i = 0; i < n; i++) { | ||
278 | if (page != oldpage) { /* Already flushed? */ | ||
279 | flush_page_for_dma(page); | ||
280 | oldpage = page; | ||
281 | } | ||
282 | page += PAGE_SIZE; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||
287 | sg->dvma_length = (__u32) sg->length; | ||
288 | sg++; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus) | ||
293 | { | ||
294 | struct iommu_struct *iommu = sbus->iommu; | ||
295 | int ioptex; | ||
296 | int i; | ||
297 | |||
298 | if (busa < iommu->start) | ||
299 | BUG(); | ||
300 | ioptex = (busa - iommu->start) >> PAGE_SHIFT; | ||
301 | for (i = 0; i < npages; i++) { | ||
302 | iopte_val(iommu->page_table[ioptex + i]) = 0; | ||
303 | iommu_invalidate_page(iommu->regs, busa); | ||
304 | busa += PAGE_SIZE; | ||
305 | } | ||
306 | bit_map_clear(&iommu->usemap, ioptex, npages); | ||
307 | } | ||
308 | |||
309 | static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus) | ||
310 | { | ||
311 | unsigned long off; | ||
312 | int npages; | ||
313 | |||
314 | off = vaddr & ~PAGE_MASK; | ||
315 | npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
316 | iommu_release_one(vaddr & PAGE_MASK, npages, sbus); | ||
317 | } | ||
318 | |||
319 | static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
320 | { | ||
321 | int n; | ||
322 | |||
323 | while(sz != 0) { | ||
324 | --sz; | ||
325 | |||
326 | n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
327 | iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus); | ||
328 | sg->dvma_address = 0x21212121; | ||
329 | sg++; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | #ifdef CONFIG_SBUS | ||
334 | static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va, | ||
335 | unsigned long addr, int len) | ||
336 | { | ||
337 | unsigned long page, end; | ||
338 | struct iommu_struct *iommu = sbus_root->iommu; | ||
339 | iopte_t *iopte = iommu->page_table; | ||
340 | iopte_t *first; | ||
341 | int ioptex; | ||
342 | |||
343 | if ((va & ~PAGE_MASK) != 0) BUG(); | ||
344 | if ((addr & ~PAGE_MASK) != 0) BUG(); | ||
345 | if ((len & ~PAGE_MASK) != 0) BUG(); | ||
346 | |||
347 | /* page color = physical address */ | ||
348 | ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, | ||
349 | addr >> PAGE_SHIFT); | ||
350 | if (ioptex < 0) | ||
351 | panic("iommu out"); | ||
352 | |||
353 | iopte += ioptex; | ||
354 | first = iopte; | ||
355 | end = addr + len; | ||
356 | while(addr < end) { | ||
357 | page = va; | ||
358 | { | ||
359 | pgd_t *pgdp; | ||
360 | pmd_t *pmdp; | ||
361 | pte_t *ptep; | ||
362 | |||
363 | if (viking_mxcc_present) | ||
364 | viking_mxcc_flush_page(page); | ||
365 | else if (viking_flush) | ||
366 | viking_flush_page(page); | ||
367 | else | ||
368 | __flush_page_to_ram(page); | ||
369 | |||
370 | pgdp = pgd_offset(&init_mm, addr); | ||
371 | pmdp = pmd_offset(pgdp, addr); | ||
372 | ptep = pte_offset_map(pmdp, addr); | ||
373 | |||
374 | set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); | ||
375 | } | ||
376 | iopte_val(*iopte++) = | ||
377 | MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc); | ||
378 | addr += PAGE_SIZE; | ||
379 | va += PAGE_SIZE; | ||
380 | } | ||
381 | /* P3: why do we need this? | ||
382 | * | ||
383 | * DAVEM: Because there are several aspects, none of which | ||
384 | * are handled by a single interface. Some cpus are | ||
385 | * completely not I/O DMA coherent, and some have | ||
386 | * virtually indexed caches. The driver DMA flushing | ||
387 | * methods handle the former case, but here during | ||
388 | * IOMMU page table modifications, and usage of non-cacheable | ||
389 | * cpu mappings of pages potentially in the cpu caches, we have | ||
390 | * to handle the latter case as well. | ||
391 | */ | ||
392 | flush_cache_all(); | ||
393 | iommu_flush_iotlb(first, len >> PAGE_SHIFT); | ||
394 | flush_tlb_all(); | ||
395 | iommu_invalidate(iommu->regs); | ||
396 | |||
397 | *pba = iommu->start + (ioptex << PAGE_SHIFT); | ||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | static void iommu_unmap_dma_area(unsigned long busa, int len) | ||
402 | { | ||
403 | struct iommu_struct *iommu = sbus_root->iommu; | ||
404 | iopte_t *iopte = iommu->page_table; | ||
405 | unsigned long end; | ||
406 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; | ||
407 | |||
408 | if ((busa & ~PAGE_MASK) != 0) BUG(); | ||
409 | if ((len & ~PAGE_MASK) != 0) BUG(); | ||
410 | |||
411 | iopte += ioptex; | ||
412 | end = busa + len; | ||
413 | while (busa < end) { | ||
414 | iopte_val(*iopte++) = 0; | ||
415 | busa += PAGE_SIZE; | ||
416 | } | ||
417 | flush_tlb_all(); | ||
418 | iommu_invalidate(iommu->regs); | ||
419 | bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); | ||
420 | } | ||
421 | |||
422 | static struct page *iommu_translate_dvma(unsigned long busa) | ||
423 | { | ||
424 | struct iommu_struct *iommu = sbus_root->iommu; | ||
425 | iopte_t *iopte = iommu->page_table; | ||
426 | |||
427 | iopte += ((busa - iommu->start) >> PAGE_SHIFT); | ||
428 | return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4)); | ||
429 | } | ||
430 | #endif | ||
431 | |||
432 | static char *iommu_lockarea(char *vaddr, unsigned long len) | ||
433 | { | ||
434 | return vaddr; | ||
435 | } | ||
436 | |||
437 | static void iommu_unlockarea(char *vaddr, unsigned long len) | ||
438 | { | ||
439 | } | ||
440 | |||
441 | void __init ld_mmu_iommu(void) | ||
442 | { | ||
443 | viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); | ||
444 | BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0); | ||
445 | BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP); | ||
446 | |||
447 | if (!BTFIXUPVAL_CALL(flush_page_for_dma)) { | ||
448 | /* IO coherent chip */ | ||
449 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0); | ||
450 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM); | ||
451 | } else if (flush_page_for_dma_global) { | ||
452 | /* flush_page_for_dma flushes everything, no matter of what page is it */ | ||
453 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM); | ||
454 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM); | ||
455 | } else { | ||
456 | BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM); | ||
457 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM); | ||
458 | } | ||
459 | BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM); | ||
460 | BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM); | ||
461 | |||
462 | #ifdef CONFIG_SBUS | ||
463 | BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM); | ||
464 | BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM); | ||
465 | BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM); | ||
466 | #endif | ||
467 | |||
468 | if (viking_mxcc_present || srmmu_modtype == HyperSparc) { | ||
469 | dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); | ||
470 | ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID; | ||
471 | } else { | ||
472 | dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); | ||
473 | ioperm_noc = IOPTE_WRITE | IOPTE_VALID; | ||
474 | } | ||
475 | } | ||
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c new file mode 100644 index 000000000000..e9f9571601ba --- /dev/null +++ b/arch/sparc/mm/loadmmu.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* $Id: loadmmu.c,v 1.56 2000/02/08 20:24:21 davem Exp $ | ||
2 | * loadmmu.c: This code loads up all the mm function pointers once the | ||
3 | * machine type has been determined. It also sets the static | ||
4 | * mmu values such as PAGE_NONE, etc. | ||
5 | * | ||
6 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
7 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/init.h> | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/a.out.h> | ||
18 | #include <asm/mmu_context.h> | ||
19 | #include <asm/oplib.h> | ||
20 | |||
21 | struct ctx_list *ctx_list_pool; | ||
22 | struct ctx_list ctx_free; | ||
23 | struct ctx_list ctx_used; | ||
24 | |||
25 | unsigned int pg_iobits; | ||
26 | |||
27 | extern void ld_mmu_sun4c(void); | ||
28 | extern void ld_mmu_srmmu(void); | ||
29 | |||
30 | void __init load_mmu(void) | ||
31 | { | ||
32 | switch(sparc_cpu_model) { | ||
33 | case sun4c: | ||
34 | case sun4: | ||
35 | ld_mmu_sun4c(); | ||
36 | break; | ||
37 | case sun4m: | ||
38 | case sun4d: | ||
39 | ld_mmu_srmmu(); | ||
40 | break; | ||
41 | default: | ||
42 | prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model); | ||
43 | prom_halt(); | ||
44 | } | ||
45 | btfixup(); | ||
46 | } | ||
diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c new file mode 100644 index 000000000000..9e215659697e --- /dev/null +++ b/arch/sparc/mm/nosrmmu.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $ | ||
2 | * nosrmmu.c: This file is a bunch of dummies for sun4 compiles, | ||
3 | * so that it does not need srmmu and avoid ifdefs. | ||
4 | * | ||
5 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <asm/mbus.h> | ||
12 | #include <asm/sbus.h> | ||
13 | |||
14 | static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n"; | ||
15 | |||
16 | enum mbus_module srmmu_modtype; | ||
17 | void *srmmu_nocache_pool; | ||
18 | |||
19 | int vac_cache_size = 0; | ||
20 | |||
21 | static void __init should_not_happen(void) | ||
22 | { | ||
23 | prom_printf(shouldnothappen); | ||
24 | prom_halt(); | ||
25 | } | ||
26 | |||
27 | void __init srmmu_frob_mem_map(unsigned long start_mem) | ||
28 | { | ||
29 | should_not_happen(); | ||
30 | } | ||
31 | |||
32 | unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long end_mem) | ||
33 | { | ||
34 | should_not_happen(); | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | void __init ld_mmu_srmmu(void) | ||
39 | { | ||
40 | should_not_happen(); | ||
41 | } | ||
42 | |||
43 | void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | void srmmu_unmapioaddr(unsigned long virt_addr) | ||
48 | { | ||
49 | } | ||
50 | |||
51 | __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c new file mode 100644 index 000000000000..ea2e2105341d --- /dev/null +++ b/arch/sparc/mm/nosun4c.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* $Id: nosun4c.c,v 1.3 2000/02/14 04:52:36 jj Exp $ | ||
2 | * nosun4c.c: This file is a bunch of dummies for SMP compiles, | ||
3 | * so that it does not need sun4c and avoid ifdefs. | ||
4 | * | ||
5 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <asm/pgtable.h> | ||
12 | |||
13 | static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n"; | ||
14 | |||
15 | /* Dummies */ | ||
16 | struct sun4c_mmu_ring { | ||
17 | unsigned long xxx1[3]; | ||
18 | unsigned char xxx2[2]; | ||
19 | int xxx3; | ||
20 | }; | ||
21 | struct sun4c_mmu_ring sun4c_kernel_ring; | ||
22 | struct sun4c_mmu_ring sun4c_kfree_ring; | ||
23 | unsigned long sun4c_kernel_faults; | ||
24 | unsigned long *sun4c_memerr_reg; | ||
25 | |||
26 | static void __init should_not_happen(void) | ||
27 | { | ||
28 | prom_printf(shouldnothappen); | ||
29 | prom_halt(); | ||
30 | } | ||
31 | |||
32 | unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem) | ||
33 | { | ||
34 | should_not_happen(); | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | void __init ld_mmu_sun4c(void) | ||
39 | { | ||
40 | should_not_happen(); | ||
41 | } | ||
42 | |||
43 | void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | void sun4c_unmapioaddr(unsigned long virt_addr) | ||
48 | { | ||
49 | } | ||
50 | |||
51 | void sun4c_complete_all_stores(void) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address) | ||
56 | { | ||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address) | ||
61 | { | ||
62 | return NULL; | ||
63 | } | ||
64 | |||
65 | void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | void __init sun4c_probe_vac(void) | ||
70 | { | ||
71 | should_not_happen(); | ||
72 | } | ||
73 | |||
74 | void __init sun4c_probe_memerr_reg(void) | ||
75 | { | ||
76 | should_not_happen(); | ||
77 | } | ||
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c new file mode 100644 index 000000000000..c89a803cbc20 --- /dev/null +++ b/arch/sparc/mm/srmmu.c | |||
@@ -0,0 +1,2274 @@ | |||
1 | /* | ||
2 | * srmmu.c: SRMMU specific routines for memory management. | ||
3 | * | ||
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) | ||
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/bootmem.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | |||
23 | #include <asm/bitext.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <asm/kdebug.h> | ||
29 | #include <asm/vaddrs.h> | ||
30 | #include <asm/traps.h> | ||
31 | #include <asm/smp.h> | ||
32 | #include <asm/mbus.h> | ||
33 | #include <asm/cache.h> | ||
34 | #include <asm/oplib.h> | ||
35 | #include <asm/sbus.h> | ||
36 | #include <asm/asi.h> | ||
37 | #include <asm/msi.h> | ||
38 | #include <asm/a.out.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/io-unit.h> | ||
41 | #include <asm/cacheflush.h> | ||
42 | #include <asm/tlbflush.h> | ||
43 | |||
44 | /* Now the cpu specific definitions. */ | ||
45 | #include <asm/viking.h> | ||
46 | #include <asm/mxcc.h> | ||
47 | #include <asm/ross.h> | ||
48 | #include <asm/tsunami.h> | ||
49 | #include <asm/swift.h> | ||
50 | #include <asm/turbosparc.h> | ||
51 | |||
52 | #include <asm/btfixup.h> | ||
53 | |||
54 | enum mbus_module srmmu_modtype; | ||
55 | unsigned int hwbug_bitmask; | ||
56 | int vac_cache_size; | ||
57 | int vac_line_size; | ||
58 | |||
59 | extern struct resource sparc_iomap; | ||
60 | |||
61 | extern unsigned long last_valid_pfn; | ||
62 | |||
63 | extern unsigned long page_kernel; | ||
64 | |||
65 | pgd_t *srmmu_swapper_pg_dir; | ||
66 | |||
67 | #ifdef CONFIG_SMP | ||
68 | #define FLUSH_BEGIN(mm) | ||
69 | #define FLUSH_END | ||
70 | #else | ||
71 | #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { | ||
72 | #define FLUSH_END } | ||
73 | #endif | ||
74 | |||
75 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) | ||
76 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) | ||
77 | |||
78 | int flush_page_for_dma_global = 1; | ||
79 | |||
80 | #ifdef CONFIG_SMP | ||
81 | BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) | ||
82 | #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) | ||
83 | #endif | ||
84 | |||
85 | char *srmmu_name; | ||
86 | |||
87 | ctxd_t *srmmu_ctx_table_phys; | ||
88 | ctxd_t *srmmu_context_table; | ||
89 | |||
90 | int viking_mxcc_present; | ||
91 | static DEFINE_SPINLOCK(srmmu_context_spinlock); | ||
92 | |||
93 | int is_hypersparc; | ||
94 | |||
95 | /* | ||
96 | * In general all page table modifications should use the V8 atomic | ||
97 | * swap instruction. This insures the mmu and the cpu are in sync | ||
98 | * with respect to ref/mod bits in the page tables. | ||
99 | */ | ||
100 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) | ||
101 | { | ||
102 | __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); | ||
103 | return value; | ||
104 | } | ||
105 | |||
106 | static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) | ||
107 | { | ||
108 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); | ||
109 | } | ||
110 | |||
111 | /* The very generic SRMMU page table operations. */ | ||
112 | static inline int srmmu_device_memory(unsigned long x) | ||
113 | { | ||
114 | return ((x & 0xF0000000) != 0); | ||
115 | } | ||
116 | |||
117 | int srmmu_cache_pagetables; | ||
118 | |||
119 | /* these will be initialized in srmmu_nocache_calcsize() */ | ||
120 | unsigned long srmmu_nocache_size; | ||
121 | unsigned long srmmu_nocache_end; | ||
122 | |||
123 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ | ||
124 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) | ||
125 | |||
126 | /* The context table is a nocache user with the biggest alignment needs. */ | ||
127 | #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) | ||
128 | |||
129 | void *srmmu_nocache_pool; | ||
130 | void *srmmu_nocache_bitmap; | ||
131 | static struct bit_map srmmu_nocache_map; | ||
132 | |||
133 | static unsigned long srmmu_pte_pfn(pte_t pte) | ||
134 | { | ||
135 | if (srmmu_device_memory(pte_val(pte))) { | ||
136 | /* Just return something that will cause | ||
137 | * pfn_valid() to return false. This makes | ||
138 | * copy_one_pte() to just directly copy to | ||
139 | * PTE over. | ||
140 | */ | ||
141 | return ~0UL; | ||
142 | } | ||
143 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); | ||
144 | } | ||
145 | |||
146 | static struct page *srmmu_pmd_page(pmd_t pmd) | ||
147 | { | ||
148 | |||
149 | if (srmmu_device_memory(pmd_val(pmd))) | ||
150 | BUG(); | ||
151 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); | ||
152 | } | ||
153 | |||
154 | static inline unsigned long srmmu_pgd_page(pgd_t pgd) | ||
155 | { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } | ||
156 | |||
157 | |||
158 | static inline int srmmu_pte_none(pte_t pte) | ||
159 | { return !(pte_val(pte) & 0xFFFFFFF); } | ||
160 | |||
161 | static inline int srmmu_pte_present(pte_t pte) | ||
162 | { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } | ||
163 | |||
164 | static inline int srmmu_pte_read(pte_t pte) | ||
165 | { return !(pte_val(pte) & SRMMU_NOREAD); } | ||
166 | |||
167 | static inline void srmmu_pte_clear(pte_t *ptep) | ||
168 | { srmmu_set_pte(ptep, __pte(0)); } | ||
169 | |||
170 | static inline int srmmu_pmd_none(pmd_t pmd) | ||
171 | { return !(pmd_val(pmd) & 0xFFFFFFF); } | ||
172 | |||
173 | static inline int srmmu_pmd_bad(pmd_t pmd) | ||
174 | { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } | ||
175 | |||
176 | static inline int srmmu_pmd_present(pmd_t pmd) | ||
177 | { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } | ||
178 | |||
179 | static inline void srmmu_pmd_clear(pmd_t *pmdp) { | ||
180 | int i; | ||
181 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) | ||
182 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); | ||
183 | } | ||
184 | |||
185 | static inline int srmmu_pgd_none(pgd_t pgd) | ||
186 | { return !(pgd_val(pgd) & 0xFFFFFFF); } | ||
187 | |||
188 | static inline int srmmu_pgd_bad(pgd_t pgd) | ||
189 | { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } | ||
190 | |||
191 | static inline int srmmu_pgd_present(pgd_t pgd) | ||
192 | { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } | ||
193 | |||
194 | static inline void srmmu_pgd_clear(pgd_t * pgdp) | ||
195 | { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } | ||
196 | |||
197 | static inline pte_t srmmu_pte_wrprotect(pte_t pte) | ||
198 | { return __pte(pte_val(pte) & ~SRMMU_WRITE);} | ||
199 | |||
200 | static inline pte_t srmmu_pte_mkclean(pte_t pte) | ||
201 | { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} | ||
202 | |||
203 | static inline pte_t srmmu_pte_mkold(pte_t pte) | ||
204 | { return __pte(pte_val(pte) & ~SRMMU_REF);} | ||
205 | |||
206 | static inline pte_t srmmu_pte_mkwrite(pte_t pte) | ||
207 | { return __pte(pte_val(pte) | SRMMU_WRITE);} | ||
208 | |||
209 | static inline pte_t srmmu_pte_mkdirty(pte_t pte) | ||
210 | { return __pte(pte_val(pte) | SRMMU_DIRTY);} | ||
211 | |||
212 | static inline pte_t srmmu_pte_mkyoung(pte_t pte) | ||
213 | { return __pte(pte_val(pte) | SRMMU_REF);} | ||
214 | |||
215 | /* | ||
216 | * Conversion functions: convert a page and protection to a page entry, | ||
217 | * and a page entry and page directory to the page they refer to. | ||
218 | */ | ||
219 | static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) | ||
220 | { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } | ||
221 | |||
222 | static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) | ||
223 | { return __pte(((page) >> 4) | pgprot_val(pgprot)); } | ||
224 | |||
225 | static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) | ||
226 | { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } | ||
227 | |||
228 | /* XXX should we hyper_flush_whole_icache here - Anton */ | ||
229 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) | ||
230 | { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } | ||
231 | |||
232 | static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) | ||
233 | { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } | ||
234 | |||
235 | static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) | ||
236 | { | ||
237 | unsigned long ptp; /* Physical address, shifted right by 4 */ | ||
238 | int i; | ||
239 | |||
240 | ptp = __nocache_pa((unsigned long) ptep) >> 4; | ||
241 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | ||
242 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | ||
243 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) | ||
248 | { | ||
249 | unsigned long ptp; /* Physical address, shifted right by 4 */ | ||
250 | int i; | ||
251 | |||
252 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ | ||
253 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | ||
254 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | ||
255 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) | ||
260 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } | ||
261 | |||
262 | /* to find an entry in a top-level page table... */ | ||
263 | extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) | ||
264 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } | ||
265 | |||
266 | /* Find an entry in the second-level page table.. */ | ||
267 | static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) | ||
268 | { | ||
269 | return (pmd_t *) srmmu_pgd_page(*dir) + | ||
270 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
271 | } | ||
272 | |||
273 | /* Find an entry in the third-level page table.. */ | ||
274 | static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) | ||
275 | { | ||
276 | void *pte; | ||
277 | |||
278 | pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); | ||
279 | return (pte_t *) pte + | ||
280 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | ||
281 | } | ||
282 | |||
283 | static unsigned long srmmu_swp_type(swp_entry_t entry) | ||
284 | { | ||
285 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; | ||
286 | } | ||
287 | |||
288 | static unsigned long srmmu_swp_offset(swp_entry_t entry) | ||
289 | { | ||
290 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; | ||
291 | } | ||
292 | |||
293 | static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) | ||
294 | { | ||
295 | return (swp_entry_t) { | ||
296 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | ||
297 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * size: bytes to allocate in the nocache area. | ||
302 | * align: bytes, number to align at. | ||
303 | * Returns the virtual address of the allocated area. | ||
304 | */ | ||
305 | static unsigned long __srmmu_get_nocache(int size, int align) | ||
306 | { | ||
307 | int offset; | ||
308 | |||
309 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { | ||
310 | printk("Size 0x%x too small for nocache request\n", size); | ||
311 | size = SRMMU_NOCACHE_BITMAP_SHIFT; | ||
312 | } | ||
313 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { | ||
314 | printk("Size 0x%x unaligned int nocache request\n", size); | ||
315 | size += SRMMU_NOCACHE_BITMAP_SHIFT-1; | ||
316 | } | ||
317 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); | ||
318 | |||
319 | offset = bit_map_string_get(&srmmu_nocache_map, | ||
320 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, | ||
321 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); | ||
322 | if (offset == -1) { | ||
323 | printk("srmmu: out of nocache %d: %d/%d\n", | ||
324 | size, (int) srmmu_nocache_size, | ||
325 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); | ||
330 | } | ||
331 | |||
332 | unsigned inline long srmmu_get_nocache(int size, int align) | ||
333 | { | ||
334 | unsigned long tmp; | ||
335 | |||
336 | tmp = __srmmu_get_nocache(size, align); | ||
337 | |||
338 | if (tmp) | ||
339 | memset((void *)tmp, 0, size); | ||
340 | |||
341 | return tmp; | ||
342 | } | ||
343 | |||
344 | void srmmu_free_nocache(unsigned long vaddr, int size) | ||
345 | { | ||
346 | int offset; | ||
347 | |||
348 | if (vaddr < SRMMU_NOCACHE_VADDR) { | ||
349 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", | ||
350 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); | ||
351 | BUG(); | ||
352 | } | ||
353 | if (vaddr+size > srmmu_nocache_end) { | ||
354 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", | ||
355 | vaddr, srmmu_nocache_end); | ||
356 | BUG(); | ||
357 | } | ||
358 | if (size & (size-1)) { | ||
359 | printk("Size 0x%x is not a power of 2\n", size); | ||
360 | BUG(); | ||
361 | } | ||
362 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { | ||
363 | printk("Size 0x%x is too small\n", size); | ||
364 | BUG(); | ||
365 | } | ||
366 | if (vaddr & (size-1)) { | ||
367 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); | ||
368 | BUG(); | ||
369 | } | ||
370 | |||
371 | offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; | ||
372 | size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; | ||
373 | |||
374 | bit_map_clear(&srmmu_nocache_map, offset, size); | ||
375 | } | ||
376 | |||
377 | void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); | ||
378 | |||
379 | extern unsigned long probe_memory(void); /* in fault.c */ | ||
380 | |||
381 | /* | ||
382 | * Reserve nocache dynamically proportionally to the amount of | ||
383 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 | ||
384 | */ | ||
385 | void srmmu_nocache_calcsize(void) | ||
386 | { | ||
387 | unsigned long sysmemavail = probe_memory() / 1024; | ||
388 | int srmmu_nocache_npages; | ||
389 | |||
390 | srmmu_nocache_npages = | ||
391 | sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; | ||
392 | |||
393 | /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ | ||
394 | // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; | ||
395 | if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) | ||
396 | srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; | ||
397 | |||
398 | /* anything above 1280 blows up */ | ||
399 | if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) | ||
400 | srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; | ||
401 | |||
402 | srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; | ||
403 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; | ||
404 | } | ||
405 | |||
406 | void srmmu_nocache_init(void) | ||
407 | { | ||
408 | unsigned int bitmap_bits; | ||
409 | pgd_t *pgd; | ||
410 | pmd_t *pmd; | ||
411 | pte_t *pte; | ||
412 | unsigned long paddr, vaddr; | ||
413 | unsigned long pteval; | ||
414 | |||
415 | bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; | ||
416 | |||
417 | srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, | ||
418 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); | ||
419 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); | ||
420 | |||
421 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); | ||
422 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); | ||
423 | |||
424 | srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | ||
425 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); | ||
426 | init_mm.pgd = srmmu_swapper_pg_dir; | ||
427 | |||
428 | srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); | ||
429 | |||
430 | paddr = __pa((unsigned long)srmmu_nocache_pool); | ||
431 | vaddr = SRMMU_NOCACHE_VADDR; | ||
432 | |||
433 | while (vaddr < srmmu_nocache_end) { | ||
434 | pgd = pgd_offset_k(vaddr); | ||
435 | pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); | ||
436 | pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); | ||
437 | |||
438 | pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); | ||
439 | |||
440 | if (srmmu_cache_pagetables) | ||
441 | pteval |= SRMMU_CACHE; | ||
442 | |||
443 | srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); | ||
444 | |||
445 | vaddr += PAGE_SIZE; | ||
446 | paddr += PAGE_SIZE; | ||
447 | } | ||
448 | |||
449 | flush_cache_all(); | ||
450 | flush_tlb_all(); | ||
451 | } | ||
452 | |||
453 | static inline pgd_t *srmmu_get_pgd_fast(void) | ||
454 | { | ||
455 | pgd_t *pgd = NULL; | ||
456 | |||
457 | pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | ||
458 | if (pgd) { | ||
459 | pgd_t *init = pgd_offset_k(0); | ||
460 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | ||
461 | memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | ||
462 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
463 | } | ||
464 | |||
465 | return pgd; | ||
466 | } | ||
467 | |||
468 | static void srmmu_free_pgd_fast(pgd_t *pgd) | ||
469 | { | ||
470 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); | ||
471 | } | ||
472 | |||
473 | static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
474 | { | ||
475 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | ||
476 | } | ||
477 | |||
478 | static void srmmu_pmd_free(pmd_t * pmd) | ||
479 | { | ||
480 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Hardware needs alignment to 256 only, but we align to whole page size | ||
485 | * to reduce fragmentation problems due to the buddy principle. | ||
486 | * XXX Provide actual fragmentation statistics in /proc. | ||
487 | * | ||
488 | * Alignments up to the page size are the same for physical and virtual | ||
489 | * addresses of the nocache area. | ||
490 | */ | ||
491 | static pte_t * | ||
492 | srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
493 | { | ||
494 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | ||
495 | } | ||
496 | |||
497 | static struct page * | ||
498 | srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
499 | { | ||
500 | unsigned long pte; | ||
501 | |||
502 | if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) | ||
503 | return NULL; | ||
504 | return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); | ||
505 | } | ||
506 | |||
507 | static void srmmu_free_pte_fast(pte_t *pte) | ||
508 | { | ||
509 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); | ||
510 | } | ||
511 | |||
512 | static void srmmu_pte_free(struct page *pte) | ||
513 | { | ||
514 | unsigned long p; | ||
515 | |||
516 | p = (unsigned long)page_address(pte); /* Cached address (for test) */ | ||
517 | if (p == 0) | ||
518 | BUG(); | ||
519 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ | ||
520 | p = (unsigned long) __nocache_va(p); /* Nocached virtual */ | ||
521 | srmmu_free_nocache(p, PTE_SIZE); | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | */ | ||
526 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) | ||
527 | { | ||
528 | struct ctx_list *ctxp; | ||
529 | |||
530 | ctxp = ctx_free.next; | ||
531 | if(ctxp != &ctx_free) { | ||
532 | remove_from_ctx_list(ctxp); | ||
533 | add_to_used_ctxlist(ctxp); | ||
534 | mm->context = ctxp->ctx_number; | ||
535 | ctxp->ctx_mm = mm; | ||
536 | return; | ||
537 | } | ||
538 | ctxp = ctx_used.next; | ||
539 | if(ctxp->ctx_mm == old_mm) | ||
540 | ctxp = ctxp->next; | ||
541 | if(ctxp == &ctx_used) | ||
542 | panic("out of mmu contexts"); | ||
543 | flush_cache_mm(ctxp->ctx_mm); | ||
544 | flush_tlb_mm(ctxp->ctx_mm); | ||
545 | remove_from_ctx_list(ctxp); | ||
546 | add_to_used_ctxlist(ctxp); | ||
547 | ctxp->ctx_mm->context = NO_CONTEXT; | ||
548 | ctxp->ctx_mm = mm; | ||
549 | mm->context = ctxp->ctx_number; | ||
550 | } | ||
551 | |||
552 | static inline void free_context(int context) | ||
553 | { | ||
554 | struct ctx_list *ctx_old; | ||
555 | |||
556 | ctx_old = ctx_list_pool + context; | ||
557 | remove_from_ctx_list(ctx_old); | ||
558 | add_to_free_ctxlist(ctx_old); | ||
559 | } | ||
560 | |||
561 | |||
562 | static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, | ||
563 | struct task_struct *tsk, int cpu) | ||
564 | { | ||
565 | if(mm->context == NO_CONTEXT) { | ||
566 | spin_lock(&srmmu_context_spinlock); | ||
567 | alloc_context(old_mm, mm); | ||
568 | spin_unlock(&srmmu_context_spinlock); | ||
569 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); | ||
570 | } | ||
571 | |||
572 | if (is_hypersparc) | ||
573 | hyper_flush_whole_icache(); | ||
574 | |||
575 | srmmu_set_context(mm->context); | ||
576 | } | ||
577 | |||
578 | /* Low level IO area allocation on the SRMMU. */ | ||
579 | static inline void srmmu_mapioaddr(unsigned long physaddr, | ||
580 | unsigned long virt_addr, int bus_type) | ||
581 | { | ||
582 | pgd_t *pgdp; | ||
583 | pmd_t *pmdp; | ||
584 | pte_t *ptep; | ||
585 | unsigned long tmp; | ||
586 | |||
587 | physaddr &= PAGE_MASK; | ||
588 | pgdp = pgd_offset_k(virt_addr); | ||
589 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); | ||
590 | ptep = srmmu_pte_offset(pmdp, virt_addr); | ||
591 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; | ||
592 | |||
593 | /* | ||
594 | * I need to test whether this is consistent over all | ||
595 | * sun4m's. The bus_type represents the upper 4 bits of | ||
596 | * 36-bit physical address on the I/O space lines... | ||
597 | */ | ||
598 | tmp |= (bus_type << 28); | ||
599 | tmp |= SRMMU_PRIV; | ||
600 | __flush_page_to_ram(virt_addr); | ||
601 | srmmu_set_pte(ptep, __pte(tmp)); | ||
602 | } | ||
603 | |||
604 | static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, | ||
605 | unsigned long xva, unsigned int len) | ||
606 | { | ||
607 | while (len != 0) { | ||
608 | len -= PAGE_SIZE; | ||
609 | srmmu_mapioaddr(xpa, xva, bus); | ||
610 | xva += PAGE_SIZE; | ||
611 | xpa += PAGE_SIZE; | ||
612 | } | ||
613 | flush_tlb_all(); | ||
614 | } | ||
615 | |||
616 | static inline void srmmu_unmapioaddr(unsigned long virt_addr) | ||
617 | { | ||
618 | pgd_t *pgdp; | ||
619 | pmd_t *pmdp; | ||
620 | pte_t *ptep; | ||
621 | |||
622 | pgdp = pgd_offset_k(virt_addr); | ||
623 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); | ||
624 | ptep = srmmu_pte_offset(pmdp, virt_addr); | ||
625 | |||
626 | /* No need to flush uncacheable page. */ | ||
627 | srmmu_pte_clear(ptep); | ||
628 | } | ||
629 | |||
630 | static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) | ||
631 | { | ||
632 | while (len != 0) { | ||
633 | len -= PAGE_SIZE; | ||
634 | srmmu_unmapioaddr(virt_addr); | ||
635 | virt_addr += PAGE_SIZE; | ||
636 | } | ||
637 | flush_tlb_all(); | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * On the SRMMU we do not have the problems with limited tlb entries | ||
642 | * for mapping kernel pages, so we just take things from the free page | ||
643 | * pool. As a side effect we are putting a little too much pressure | ||
644 | * on the gfp() subsystem. This setup also makes the logic of the | ||
645 | * iommu mapping code a lot easier as we can transparently handle | ||
646 | * mappings on the kernel stack without any special code as we did | ||
647 | * need on the sun4c. | ||
648 | */ | ||
649 | struct thread_info *srmmu_alloc_thread_info(void) | ||
650 | { | ||
651 | struct thread_info *ret; | ||
652 | |||
653 | ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, | ||
654 | THREAD_INFO_ORDER); | ||
655 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
656 | if (ret) | ||
657 | memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); | ||
658 | #endif /* DEBUG_STACK_USAGE */ | ||
659 | |||
660 | return ret; | ||
661 | } | ||
662 | |||
663 | static void srmmu_free_thread_info(struct thread_info *ti) | ||
664 | { | ||
665 | free_pages((unsigned long)ti, THREAD_INFO_ORDER); | ||
666 | } | ||
667 | |||
668 | /* tsunami.S */ | ||
669 | extern void tsunami_flush_cache_all(void); | ||
670 | extern void tsunami_flush_cache_mm(struct mm_struct *mm); | ||
671 | extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
672 | extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | ||
673 | extern void tsunami_flush_page_to_ram(unsigned long page); | ||
674 | extern void tsunami_flush_page_for_dma(unsigned long page); | ||
675 | extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | ||
676 | extern void tsunami_flush_tlb_all(void); | ||
677 | extern void tsunami_flush_tlb_mm(struct mm_struct *mm); | ||
678 | extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
679 | extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | ||
680 | extern void tsunami_setup_blockops(void); | ||
681 | |||
682 | /* | ||
683 | * Workaround, until we find what's going on with Swift. When low on memory, | ||
684 | * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find | ||
685 | * out it is already in page tables/ fault again on the same instruction. | ||
686 | * I really don't understand it, have checked it and contexts | ||
687 | * are right, flush_tlb_all is done as well, and it faults again... | ||
688 | * Strange. -jj | ||
689 | * | ||
690 | * The following code is a deadwood that may be necessary when | ||
691 | * we start to make precise page flushes again. --zaitcev | ||
692 | */ | ||
693 | static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) | ||
694 | { | ||
695 | #if 0 | ||
696 | static unsigned long last; | ||
697 | unsigned int val; | ||
698 | /* unsigned int n; */ | ||
699 | |||
700 | if (address == last) { | ||
701 | val = srmmu_hwprobe(address); | ||
702 | if (val != 0 && pte_val(pte) != val) { | ||
703 | printk("swift_update_mmu_cache: " | ||
704 | "addr %lx put %08x probed %08x from %p\n", | ||
705 | address, pte_val(pte), val, | ||
706 | __builtin_return_address(0)); | ||
707 | srmmu_flush_whole_tlb(); | ||
708 | } | ||
709 | } | ||
710 | last = address; | ||
711 | #endif | ||
712 | } | ||
713 | |||
714 | /* swift.S */ | ||
715 | extern void swift_flush_cache_all(void); | ||
716 | extern void swift_flush_cache_mm(struct mm_struct *mm); | ||
717 | extern void swift_flush_cache_range(struct vm_area_struct *vma, | ||
718 | unsigned long start, unsigned long end); | ||
719 | extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | ||
720 | extern void swift_flush_page_to_ram(unsigned long page); | ||
721 | extern void swift_flush_page_for_dma(unsigned long page); | ||
722 | extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | ||
723 | extern void swift_flush_tlb_all(void); | ||
724 | extern void swift_flush_tlb_mm(struct mm_struct *mm); | ||
725 | extern void swift_flush_tlb_range(struct vm_area_struct *vma, | ||
726 | unsigned long start, unsigned long end); | ||
727 | extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | ||
728 | |||
729 | #if 0 /* P3: deadwood to debug precise flushes on Swift. */ | ||
730 | void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
731 | { | ||
732 | int cctx, ctx1; | ||
733 | |||
734 | page &= PAGE_MASK; | ||
735 | if ((ctx1 = vma->vm_mm->context) != -1) { | ||
736 | cctx = srmmu_get_context(); | ||
737 | /* Is context # ever different from current context? P3 */ | ||
738 | if (cctx != ctx1) { | ||
739 | printk("flush ctx %02x curr %02x\n", ctx1, cctx); | ||
740 | srmmu_set_context(ctx1); | ||
741 | swift_flush_page(page); | ||
742 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | ||
743 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); | ||
744 | srmmu_set_context(cctx); | ||
745 | } else { | ||
746 | /* Rm. prot. bits from virt. c. */ | ||
747 | /* swift_flush_cache_all(); */ | ||
748 | /* swift_flush_cache_page(vma, page); */ | ||
749 | swift_flush_page(page); | ||
750 | |||
751 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | ||
752 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); | ||
753 | /* same as above: srmmu_flush_tlb_page() */ | ||
754 | } | ||
755 | } | ||
756 | } | ||
757 | #endif | ||
758 | |||
759 | /* | ||
760 | * The following are all MBUS based SRMMU modules, and therefore could | ||
761 | * be found in a multiprocessor configuration. On the whole, these | ||
762 | * chips seems to be much more touchy about DVMA and page tables | ||
763 | * with respect to cache coherency. | ||
764 | */ | ||
765 | |||
766 | /* Cypress flushes. */ | ||
767 | static void cypress_flush_cache_all(void) | ||
768 | { | ||
769 | volatile unsigned long cypress_sucks; | ||
770 | unsigned long faddr, tagval; | ||
771 | |||
772 | flush_user_windows(); | ||
773 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { | ||
774 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : | ||
775 | "=r" (tagval) : | ||
776 | "r" (faddr), "r" (0x40000), | ||
777 | "i" (ASI_M_DATAC_TAG)); | ||
778 | |||
779 | /* If modified and valid, kick it. */ | ||
780 | if((tagval & 0x60) == 0x60) | ||
781 | cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); | ||
782 | } | ||
783 | } | ||
784 | |||
785 | static void cypress_flush_cache_mm(struct mm_struct *mm) | ||
786 | { | ||
787 | register unsigned long a, b, c, d, e, f, g; | ||
788 | unsigned long flags, faddr; | ||
789 | int octx; | ||
790 | |||
791 | FLUSH_BEGIN(mm) | ||
792 | flush_user_windows(); | ||
793 | local_irq_save(flags); | ||
794 | octx = srmmu_get_context(); | ||
795 | srmmu_set_context(mm->context); | ||
796 | a = 0x20; b = 0x40; c = 0x60; | ||
797 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | ||
798 | |||
799 | faddr = (0x10000 - 0x100); | ||
800 | goto inside; | ||
801 | do { | ||
802 | faddr -= 0x100; | ||
803 | inside: | ||
804 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | ||
805 | "sta %%g0, [%0 + %2] %1\n\t" | ||
806 | "sta %%g0, [%0 + %3] %1\n\t" | ||
807 | "sta %%g0, [%0 + %4] %1\n\t" | ||
808 | "sta %%g0, [%0 + %5] %1\n\t" | ||
809 | "sta %%g0, [%0 + %6] %1\n\t" | ||
810 | "sta %%g0, [%0 + %7] %1\n\t" | ||
811 | "sta %%g0, [%0 + %8] %1\n\t" : : | ||
812 | "r" (faddr), "i" (ASI_M_FLUSH_CTX), | ||
813 | "r" (a), "r" (b), "r" (c), "r" (d), | ||
814 | "r" (e), "r" (f), "r" (g)); | ||
815 | } while(faddr); | ||
816 | srmmu_set_context(octx); | ||
817 | local_irq_restore(flags); | ||
818 | FLUSH_END | ||
819 | } | ||
820 | |||
821 | static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
822 | { | ||
823 | struct mm_struct *mm = vma->vm_mm; | ||
824 | register unsigned long a, b, c, d, e, f, g; | ||
825 | unsigned long flags, faddr; | ||
826 | int octx; | ||
827 | |||
828 | FLUSH_BEGIN(mm) | ||
829 | flush_user_windows(); | ||
830 | local_irq_save(flags); | ||
831 | octx = srmmu_get_context(); | ||
832 | srmmu_set_context(mm->context); | ||
833 | a = 0x20; b = 0x40; c = 0x60; | ||
834 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | ||
835 | |||
836 | start &= SRMMU_REAL_PMD_MASK; | ||
837 | while(start < end) { | ||
838 | faddr = (start + (0x10000 - 0x100)); | ||
839 | goto inside; | ||
840 | do { | ||
841 | faddr -= 0x100; | ||
842 | inside: | ||
843 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | ||
844 | "sta %%g0, [%0 + %2] %1\n\t" | ||
845 | "sta %%g0, [%0 + %3] %1\n\t" | ||
846 | "sta %%g0, [%0 + %4] %1\n\t" | ||
847 | "sta %%g0, [%0 + %5] %1\n\t" | ||
848 | "sta %%g0, [%0 + %6] %1\n\t" | ||
849 | "sta %%g0, [%0 + %7] %1\n\t" | ||
850 | "sta %%g0, [%0 + %8] %1\n\t" : : | ||
851 | "r" (faddr), | ||
852 | "i" (ASI_M_FLUSH_SEG), | ||
853 | "r" (a), "r" (b), "r" (c), "r" (d), | ||
854 | "r" (e), "r" (f), "r" (g)); | ||
855 | } while (faddr != start); | ||
856 | start += SRMMU_REAL_PMD_SIZE; | ||
857 | } | ||
858 | srmmu_set_context(octx); | ||
859 | local_irq_restore(flags); | ||
860 | FLUSH_END | ||
861 | } | ||
862 | |||
863 | static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | ||
864 | { | ||
865 | register unsigned long a, b, c, d, e, f, g; | ||
866 | struct mm_struct *mm = vma->vm_mm; | ||
867 | unsigned long flags, line; | ||
868 | int octx; | ||
869 | |||
870 | FLUSH_BEGIN(mm) | ||
871 | flush_user_windows(); | ||
872 | local_irq_save(flags); | ||
873 | octx = srmmu_get_context(); | ||
874 | srmmu_set_context(mm->context); | ||
875 | a = 0x20; b = 0x40; c = 0x60; | ||
876 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | ||
877 | |||
878 | page &= PAGE_MASK; | ||
879 | line = (page + PAGE_SIZE) - 0x100; | ||
880 | goto inside; | ||
881 | do { | ||
882 | line -= 0x100; | ||
883 | inside: | ||
884 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | ||
885 | "sta %%g0, [%0 + %2] %1\n\t" | ||
886 | "sta %%g0, [%0 + %3] %1\n\t" | ||
887 | "sta %%g0, [%0 + %4] %1\n\t" | ||
888 | "sta %%g0, [%0 + %5] %1\n\t" | ||
889 | "sta %%g0, [%0 + %6] %1\n\t" | ||
890 | "sta %%g0, [%0 + %7] %1\n\t" | ||
891 | "sta %%g0, [%0 + %8] %1\n\t" : : | ||
892 | "r" (line), | ||
893 | "i" (ASI_M_FLUSH_PAGE), | ||
894 | "r" (a), "r" (b), "r" (c), "r" (d), | ||
895 | "r" (e), "r" (f), "r" (g)); | ||
896 | } while(line != page); | ||
897 | srmmu_set_context(octx); | ||
898 | local_irq_restore(flags); | ||
899 | FLUSH_END | ||
900 | } | ||
901 | |||
902 | /* Cypress is copy-back, at least that is how we configure it. */ | ||
903 | static void cypress_flush_page_to_ram(unsigned long page) | ||
904 | { | ||
905 | register unsigned long a, b, c, d, e, f, g; | ||
906 | unsigned long line; | ||
907 | |||
908 | a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; | ||
909 | page &= PAGE_MASK; | ||
910 | line = (page + PAGE_SIZE) - 0x100; | ||
911 | goto inside; | ||
912 | do { | ||
913 | line -= 0x100; | ||
914 | inside: | ||
915 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" | ||
916 | "sta %%g0, [%0 + %2] %1\n\t" | ||
917 | "sta %%g0, [%0 + %3] %1\n\t" | ||
918 | "sta %%g0, [%0 + %4] %1\n\t" | ||
919 | "sta %%g0, [%0 + %5] %1\n\t" | ||
920 | "sta %%g0, [%0 + %6] %1\n\t" | ||
921 | "sta %%g0, [%0 + %7] %1\n\t" | ||
922 | "sta %%g0, [%0 + %8] %1\n\t" : : | ||
923 | "r" (line), | ||
924 | "i" (ASI_M_FLUSH_PAGE), | ||
925 | "r" (a), "r" (b), "r" (c), "r" (d), | ||
926 | "r" (e), "r" (f), "r" (g)); | ||
927 | } while(line != page); | ||
928 | } | ||
929 | |||
930 | /* Cypress is also IO cache coherent. */ | ||
931 | static void cypress_flush_page_for_dma(unsigned long page) | ||
932 | { | ||
933 | } | ||
934 | |||
935 | /* Cypress has unified L2 VIPT, from which both instructions and data | ||
936 | * are stored. It does not have an onboard icache of any sort, therefore | ||
937 | * no flush is necessary. | ||
938 | */ | ||
939 | static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | ||
940 | { | ||
941 | } | ||
942 | |||
943 | static void cypress_flush_tlb_all(void) | ||
944 | { | ||
945 | srmmu_flush_whole_tlb(); | ||
946 | } | ||
947 | |||
948 | static void cypress_flush_tlb_mm(struct mm_struct *mm) | ||
949 | { | ||
950 | FLUSH_BEGIN(mm) | ||
951 | __asm__ __volatile__( | ||
952 | "lda [%0] %3, %%g5\n\t" | ||
953 | "sta %2, [%0] %3\n\t" | ||
954 | "sta %%g0, [%1] %4\n\t" | ||
955 | "sta %%g5, [%0] %3\n" | ||
956 | : /* no outputs */ | ||
957 | : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), | ||
958 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) | ||
959 | : "g5"); | ||
960 | FLUSH_END | ||
961 | } | ||
962 | |||
963 | static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
964 | { | ||
965 | struct mm_struct *mm = vma->vm_mm; | ||
966 | unsigned long size; | ||
967 | |||
968 | FLUSH_BEGIN(mm) | ||
969 | start &= SRMMU_PGDIR_MASK; | ||
970 | size = SRMMU_PGDIR_ALIGN(end) - start; | ||
971 | __asm__ __volatile__( | ||
972 | "lda [%0] %5, %%g5\n\t" | ||
973 | "sta %1, [%0] %5\n" | ||
974 | "1:\n\t" | ||
975 | "subcc %3, %4, %3\n\t" | ||
976 | "bne 1b\n\t" | ||
977 | " sta %%g0, [%2 + %3] %6\n\t" | ||
978 | "sta %%g5, [%0] %5\n" | ||
979 | : /* no outputs */ | ||
980 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), | ||
981 | "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), | ||
982 | "i" (ASI_M_FLUSH_PROBE) | ||
983 | : "g5", "cc"); | ||
984 | FLUSH_END | ||
985 | } | ||
986 | |||
987 | static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
988 | { | ||
989 | struct mm_struct *mm = vma->vm_mm; | ||
990 | |||
991 | FLUSH_BEGIN(mm) | ||
992 | __asm__ __volatile__( | ||
993 | "lda [%0] %3, %%g5\n\t" | ||
994 | "sta %1, [%0] %3\n\t" | ||
995 | "sta %%g0, [%2] %4\n\t" | ||
996 | "sta %%g5, [%0] %3\n" | ||
997 | : /* no outputs */ | ||
998 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), | ||
999 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) | ||
1000 | : "g5"); | ||
1001 | FLUSH_END | ||
1002 | } | ||
1003 | |||
1004 | /* viking.S */ | ||
1005 | extern void viking_flush_cache_all(void); | ||
1006 | extern void viking_flush_cache_mm(struct mm_struct *mm); | ||
1007 | extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
1008 | unsigned long end); | ||
1009 | extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | ||
1010 | extern void viking_flush_page_to_ram(unsigned long page); | ||
1011 | extern void viking_flush_page_for_dma(unsigned long page); | ||
1012 | extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); | ||
1013 | extern void viking_flush_page(unsigned long page); | ||
1014 | extern void viking_mxcc_flush_page(unsigned long page); | ||
1015 | extern void viking_flush_tlb_all(void); | ||
1016 | extern void viking_flush_tlb_mm(struct mm_struct *mm); | ||
1017 | extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
1018 | unsigned long end); | ||
1019 | extern void viking_flush_tlb_page(struct vm_area_struct *vma, | ||
1020 | unsigned long page); | ||
1021 | extern void sun4dsmp_flush_tlb_all(void); | ||
1022 | extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); | ||
1023 | extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
1024 | unsigned long end); | ||
1025 | extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, | ||
1026 | unsigned long page); | ||
1027 | |||
1028 | /* hypersparc.S */ | ||
1029 | extern void hypersparc_flush_cache_all(void); | ||
1030 | extern void hypersparc_flush_cache_mm(struct mm_struct *mm); | ||
1031 | extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
1032 | extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); | ||
1033 | extern void hypersparc_flush_page_to_ram(unsigned long page); | ||
1034 | extern void hypersparc_flush_page_for_dma(unsigned long page); | ||
1035 | extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); | ||
1036 | extern void hypersparc_flush_tlb_all(void); | ||
1037 | extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); | ||
1038 | extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
1039 | extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | ||
1040 | extern void hypersparc_setup_blockops(void); | ||
1041 | |||
1042 | /* | ||
1043 | * NOTE: All of this startup code assumes the low 16mb (approx.) of | ||
1044 | * kernel mappings are done with one single contiguous chunk of | ||
1045 | * ram. On small ram machines (classics mainly) we only get | ||
1046 | * around 8mb mapped for us. | ||
1047 | */ | ||
1048 | |||
1049 | void __init early_pgtable_allocfail(char *type) | ||
1050 | { | ||
1051 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); | ||
1052 | prom_halt(); | ||
1053 | } | ||
1054 | |||
1055 | void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) | ||
1056 | { | ||
1057 | pgd_t *pgdp; | ||
1058 | pmd_t *pmdp; | ||
1059 | pte_t *ptep; | ||
1060 | |||
1061 | while(start < end) { | ||
1062 | pgdp = pgd_offset_k(start); | ||
1063 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | ||
1064 | pmdp = (pmd_t *) __srmmu_get_nocache( | ||
1065 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | ||
1066 | if (pmdp == NULL) | ||
1067 | early_pgtable_allocfail("pmd"); | ||
1068 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | ||
1069 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | ||
1070 | } | ||
1071 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | ||
1072 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | ||
1073 | ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | ||
1074 | if (ptep == NULL) | ||
1075 | early_pgtable_allocfail("pte"); | ||
1076 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | ||
1077 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | ||
1078 | } | ||
1079 | if (start > (0xffffffffUL - PMD_SIZE)) | ||
1080 | break; | ||
1081 | start = (start + PMD_SIZE) & PMD_MASK; | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) | ||
1086 | { | ||
1087 | pgd_t *pgdp; | ||
1088 | pmd_t *pmdp; | ||
1089 | pte_t *ptep; | ||
1090 | |||
1091 | while(start < end) { | ||
1092 | pgdp = pgd_offset_k(start); | ||
1093 | if(srmmu_pgd_none(*pgdp)) { | ||
1094 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | ||
1095 | if (pmdp == NULL) | ||
1096 | early_pgtable_allocfail("pmd"); | ||
1097 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); | ||
1098 | srmmu_pgd_set(pgdp, pmdp); | ||
1099 | } | ||
1100 | pmdp = srmmu_pmd_offset(pgdp, start); | ||
1101 | if(srmmu_pmd_none(*pmdp)) { | ||
1102 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | ||
1103 | PTE_SIZE); | ||
1104 | if (ptep == NULL) | ||
1105 | early_pgtable_allocfail("pte"); | ||
1106 | memset(ptep, 0, PTE_SIZE); | ||
1107 | srmmu_pmd_set(pmdp, ptep); | ||
1108 | } | ||
1109 | if (start > (0xffffffffUL - PMD_SIZE)) | ||
1110 | break; | ||
1111 | start = (start + PMD_SIZE) & PMD_MASK; | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | /* | ||
1116 | * This is much cleaner than poking around physical address space | ||
1117 | * looking at the prom's page table directly which is what most | ||
1118 | * other OS's do. Yuck... this is much better. | ||
1119 | */ | ||
1120 | void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) | ||
1121 | { | ||
1122 | pgd_t *pgdp; | ||
1123 | pmd_t *pmdp; | ||
1124 | pte_t *ptep; | ||
1125 | int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ | ||
1126 | unsigned long prompte; | ||
1127 | |||
1128 | while(start <= end) { | ||
1129 | if (start == 0) | ||
1130 | break; /* probably wrap around */ | ||
1131 | if(start == 0xfef00000) | ||
1132 | start = KADB_DEBUGGER_BEGVM; | ||
1133 | if(!(prompte = srmmu_hwprobe(start))) { | ||
1134 | start += PAGE_SIZE; | ||
1135 | continue; | ||
1136 | } | ||
1137 | |||
1138 | /* A red snapper, see what it really is. */ | ||
1139 | what = 0; | ||
1140 | |||
1141 | if(!(start & ~(SRMMU_REAL_PMD_MASK))) { | ||
1142 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) | ||
1143 | what = 1; | ||
1144 | } | ||
1145 | |||
1146 | if(!(start & ~(SRMMU_PGDIR_MASK))) { | ||
1147 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == | ||
1148 | prompte) | ||
1149 | what = 2; | ||
1150 | } | ||
1151 | |||
1152 | pgdp = pgd_offset_k(start); | ||
1153 | if(what == 2) { | ||
1154 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); | ||
1155 | start += SRMMU_PGDIR_SIZE; | ||
1156 | continue; | ||
1157 | } | ||
1158 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | ||
1159 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | ||
1160 | if (pmdp == NULL) | ||
1161 | early_pgtable_allocfail("pmd"); | ||
1162 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | ||
1163 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); | ||
1164 | } | ||
1165 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); | ||
1166 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | ||
1167 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | ||
1168 | PTE_SIZE); | ||
1169 | if (ptep == NULL) | ||
1170 | early_pgtable_allocfail("pte"); | ||
1171 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | ||
1172 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); | ||
1173 | } | ||
1174 | if(what == 1) { | ||
1175 | /* | ||
1176 | * We bend the rule where all 16 PTPs in a pmd_t point | ||
1177 | * inside the same PTE page, and we leak a perfectly | ||
1178 | * good hardware PTE piece. Alternatives seem worse. | ||
1179 | */ | ||
1180 | unsigned int x; /* Index of HW PMD in soft cluster */ | ||
1181 | x = (start >> PMD_SHIFT) & 15; | ||
1182 | *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; | ||
1183 | start += SRMMU_REAL_PMD_SIZE; | ||
1184 | continue; | ||
1185 | } | ||
1186 | ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); | ||
1187 | *(pte_t *)__nocache_fix(ptep) = __pte(prompte); | ||
1188 | start += PAGE_SIZE; | ||
1189 | } | ||
1190 | } | ||
1191 | |||
1192 | #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) | ||
1193 | |||
1194 | /* Create a third-level SRMMU 16MB page mapping. */ | ||
1195 | static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) | ||
1196 | { | ||
1197 | pgd_t *pgdp = pgd_offset_k(vaddr); | ||
1198 | unsigned long big_pte; | ||
1199 | |||
1200 | big_pte = KERNEL_PTE(phys_base >> 4); | ||
1201 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); | ||
1202 | } | ||
1203 | |||
1204 | /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ | ||
1205 | static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) | ||
1206 | { | ||
1207 | unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); | ||
1208 | unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); | ||
1209 | unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); | ||
1210 | /* Map "low" memory only */ | ||
1211 | const unsigned long min_vaddr = PAGE_OFFSET; | ||
1212 | const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; | ||
1213 | |||
1214 | if (vstart < min_vaddr || vstart >= max_vaddr) | ||
1215 | return vstart; | ||
1216 | |||
1217 | if (vend > max_vaddr || vend < min_vaddr) | ||
1218 | vend = max_vaddr; | ||
1219 | |||
1220 | while(vstart < vend) { | ||
1221 | do_large_mapping(vstart, pstart); | ||
1222 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; | ||
1223 | } | ||
1224 | return vstart; | ||
1225 | } | ||
1226 | |||
1227 | static inline void memprobe_error(char *msg) | ||
1228 | { | ||
1229 | prom_printf(msg); | ||
1230 | prom_printf("Halting now...\n"); | ||
1231 | prom_halt(); | ||
1232 | } | ||
1233 | |||
1234 | static inline void map_kernel(void) | ||
1235 | { | ||
1236 | int i; | ||
1237 | |||
1238 | if (phys_base > 0) { | ||
1239 | do_large_mapping(PAGE_OFFSET, phys_base); | ||
1240 | } | ||
1241 | |||
1242 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
1243 | map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); | ||
1244 | } | ||
1245 | |||
1246 | BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); | ||
1247 | } | ||
1248 | |||
1249 | /* Paging initialization on the Sparc Reference MMU. */ | ||
1250 | extern void sparc_context_init(int); | ||
1251 | |||
1252 | void (*poke_srmmu)(void) __initdata = NULL; | ||
1253 | |||
1254 | extern unsigned long bootmem_init(unsigned long *pages_avail); | ||
1255 | |||
1256 | void __init srmmu_paging_init(void) | ||
1257 | { | ||
1258 | int i, cpunode; | ||
1259 | char node_str[128]; | ||
1260 | pgd_t *pgd; | ||
1261 | pmd_t *pmd; | ||
1262 | pte_t *pte; | ||
1263 | unsigned long pages_avail; | ||
1264 | |||
1265 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ | ||
1266 | |||
1267 | if (sparc_cpu_model == sun4d) | ||
1268 | num_contexts = 65536; /* We know it is Viking */ | ||
1269 | else { | ||
1270 | /* Find the number of contexts on the srmmu. */ | ||
1271 | cpunode = prom_getchild(prom_root_node); | ||
1272 | num_contexts = 0; | ||
1273 | while(cpunode != 0) { | ||
1274 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | ||
1275 | if(!strcmp(node_str, "cpu")) { | ||
1276 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); | ||
1277 | break; | ||
1278 | } | ||
1279 | cpunode = prom_getsibling(cpunode); | ||
1280 | } | ||
1281 | } | ||
1282 | |||
1283 | if(!num_contexts) { | ||
1284 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); | ||
1285 | prom_halt(); | ||
1286 | } | ||
1287 | |||
1288 | pages_avail = 0; | ||
1289 | last_valid_pfn = bootmem_init(&pages_avail); | ||
1290 | |||
1291 | srmmu_nocache_calcsize(); | ||
1292 | srmmu_nocache_init(); | ||
1293 | srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); | ||
1294 | map_kernel(); | ||
1295 | |||
1296 | /* ctx table has to be physically aligned to its size */ | ||
1297 | srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); | ||
1298 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); | ||
1299 | |||
1300 | for(i = 0; i < num_contexts; i++) | ||
1301 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); | ||
1302 | |||
1303 | flush_cache_all(); | ||
1304 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); | ||
1305 | flush_tlb_all(); | ||
1306 | poke_srmmu(); | ||
1307 | |||
1308 | #ifdef CONFIG_SUN_IO | ||
1309 | srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); | ||
1310 | srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); | ||
1311 | #endif | ||
1312 | |||
1313 | srmmu_allocate_ptable_skeleton( | ||
1314 | __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); | ||
1315 | srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); | ||
1316 | |||
1317 | pgd = pgd_offset_k(PKMAP_BASE); | ||
1318 | pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); | ||
1319 | pte = srmmu_pte_offset(pmd, PKMAP_BASE); | ||
1320 | pkmap_page_table = pte; | ||
1321 | |||
1322 | flush_cache_all(); | ||
1323 | flush_tlb_all(); | ||
1324 | |||
1325 | sparc_context_init(num_contexts); | ||
1326 | |||
1327 | kmap_init(); | ||
1328 | |||
1329 | { | ||
1330 | unsigned long zones_size[MAX_NR_ZONES]; | ||
1331 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
1332 | unsigned long npages; | ||
1333 | int znum; | ||
1334 | |||
1335 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | ||
1336 | zones_size[znum] = zholes_size[znum] = 0; | ||
1337 | |||
1338 | npages = max_low_pfn - pfn_base; | ||
1339 | |||
1340 | zones_size[ZONE_DMA] = npages; | ||
1341 | zholes_size[ZONE_DMA] = npages - pages_avail; | ||
1342 | |||
1343 | npages = highend_pfn - max_low_pfn; | ||
1344 | zones_size[ZONE_HIGHMEM] = npages; | ||
1345 | zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); | ||
1346 | |||
1347 | free_area_init_node(0, &contig_page_data, zones_size, | ||
1348 | pfn_base, zholes_size); | ||
1349 | } | ||
1350 | } | ||
1351 | |||
1352 | static void srmmu_mmu_info(struct seq_file *m) | ||
1353 | { | ||
1354 | seq_printf(m, | ||
1355 | "MMU type\t: %s\n" | ||
1356 | "contexts\t: %d\n" | ||
1357 | "nocache total\t: %ld\n" | ||
1358 | "nocache used\t: %d\n", | ||
1359 | srmmu_name, | ||
1360 | num_contexts, | ||
1361 | srmmu_nocache_size, | ||
1362 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | ||
1363 | } | ||
1364 | |||
1365 | static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) | ||
1366 | { | ||
1367 | } | ||
1368 | |||
1369 | static void srmmu_destroy_context(struct mm_struct *mm) | ||
1370 | { | ||
1371 | |||
1372 | if(mm->context != NO_CONTEXT) { | ||
1373 | flush_cache_mm(mm); | ||
1374 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); | ||
1375 | flush_tlb_mm(mm); | ||
1376 | spin_lock(&srmmu_context_spinlock); | ||
1377 | free_context(mm->context); | ||
1378 | spin_unlock(&srmmu_context_spinlock); | ||
1379 | mm->context = NO_CONTEXT; | ||
1380 | } | ||
1381 | } | ||
1382 | |||
1383 | /* Init various srmmu chip types. */ | ||
1384 | static void __init srmmu_is_bad(void) | ||
1385 | { | ||
1386 | prom_printf("Could not determine SRMMU chip type.\n"); | ||
1387 | prom_halt(); | ||
1388 | } | ||
1389 | |||
1390 | static void __init init_vac_layout(void) | ||
1391 | { | ||
1392 | int nd, cache_lines; | ||
1393 | char node_str[128]; | ||
1394 | #ifdef CONFIG_SMP | ||
1395 | int cpu = 0; | ||
1396 | unsigned long max_size = 0; | ||
1397 | unsigned long min_line_size = 0x10000000; | ||
1398 | #endif | ||
1399 | |||
1400 | nd = prom_getchild(prom_root_node); | ||
1401 | while((nd = prom_getsibling(nd)) != 0) { | ||
1402 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); | ||
1403 | if(!strcmp(node_str, "cpu")) { | ||
1404 | vac_line_size = prom_getint(nd, "cache-line-size"); | ||
1405 | if (vac_line_size == -1) { | ||
1406 | prom_printf("can't determine cache-line-size, " | ||
1407 | "halting.\n"); | ||
1408 | prom_halt(); | ||
1409 | } | ||
1410 | cache_lines = prom_getint(nd, "cache-nlines"); | ||
1411 | if (cache_lines == -1) { | ||
1412 | prom_printf("can't determine cache-nlines, halting.\n"); | ||
1413 | prom_halt(); | ||
1414 | } | ||
1415 | |||
1416 | vac_cache_size = cache_lines * vac_line_size; | ||
1417 | #ifdef CONFIG_SMP | ||
1418 | if(vac_cache_size > max_size) | ||
1419 | max_size = vac_cache_size; | ||
1420 | if(vac_line_size < min_line_size) | ||
1421 | min_line_size = vac_line_size; | ||
1422 | cpu++; | ||
1423 | if (cpu >= NR_CPUS || !cpu_online(cpu)) | ||
1424 | break; | ||
1425 | #else | ||
1426 | break; | ||
1427 | #endif | ||
1428 | } | ||
1429 | } | ||
1430 | if(nd == 0) { | ||
1431 | prom_printf("No CPU nodes found, halting.\n"); | ||
1432 | prom_halt(); | ||
1433 | } | ||
1434 | #ifdef CONFIG_SMP | ||
1435 | vac_cache_size = max_size; | ||
1436 | vac_line_size = min_line_size; | ||
1437 | #endif | ||
1438 | printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", | ||
1439 | (int)vac_cache_size, (int)vac_line_size); | ||
1440 | } | ||
1441 | |||
1442 | static void __init poke_hypersparc(void) | ||
1443 | { | ||
1444 | volatile unsigned long clear; | ||
1445 | unsigned long mreg = srmmu_get_mmureg(); | ||
1446 | |||
1447 | hyper_flush_unconditional_combined(); | ||
1448 | |||
1449 | mreg &= ~(HYPERSPARC_CWENABLE); | ||
1450 | mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); | ||
1451 | mreg |= (HYPERSPARC_CMODE); | ||
1452 | |||
1453 | srmmu_set_mmureg(mreg); | ||
1454 | |||
1455 | #if 0 /* XXX I think this is bad news... -DaveM */ | ||
1456 | hyper_clear_all_tags(); | ||
1457 | #endif | ||
1458 | |||
1459 | put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); | ||
1460 | hyper_flush_whole_icache(); | ||
1461 | clear = srmmu_get_faddr(); | ||
1462 | clear = srmmu_get_fstatus(); | ||
1463 | } | ||
1464 | |||
1465 | static void __init init_hypersparc(void) | ||
1466 | { | ||
1467 | srmmu_name = "ROSS HyperSparc"; | ||
1468 | srmmu_modtype = HyperSparc; | ||
1469 | |||
1470 | init_vac_layout(); | ||
1471 | |||
1472 | is_hypersparc = 1; | ||
1473 | |||
1474 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | ||
1475 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | ||
1476 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | ||
1477 | BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); | ||
1478 | BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1479 | BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); | ||
1480 | BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); | ||
1481 | |||
1482 | BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1483 | BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1484 | BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1485 | BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1486 | |||
1487 | BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
1488 | BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); | ||
1489 | BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); | ||
1490 | |||
1491 | |||
1492 | poke_srmmu = poke_hypersparc; | ||
1493 | |||
1494 | hypersparc_setup_blockops(); | ||
1495 | } | ||
1496 | |||
1497 | static void __init poke_cypress(void) | ||
1498 | { | ||
1499 | unsigned long mreg = srmmu_get_mmureg(); | ||
1500 | unsigned long faddr, tagval; | ||
1501 | volatile unsigned long cypress_sucks; | ||
1502 | volatile unsigned long clear; | ||
1503 | |||
1504 | clear = srmmu_get_faddr(); | ||
1505 | clear = srmmu_get_fstatus(); | ||
1506 | |||
1507 | if (!(mreg & CYPRESS_CENABLE)) { | ||
1508 | for(faddr = 0x0; faddr < 0x10000; faddr += 20) { | ||
1509 | __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" | ||
1510 | "sta %%g0, [%0] %2\n\t" : : | ||
1511 | "r" (faddr), "r" (0x40000), | ||
1512 | "i" (ASI_M_DATAC_TAG)); | ||
1513 | } | ||
1514 | } else { | ||
1515 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { | ||
1516 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : | ||
1517 | "=r" (tagval) : | ||
1518 | "r" (faddr), "r" (0x40000), | ||
1519 | "i" (ASI_M_DATAC_TAG)); | ||
1520 | |||
1521 | /* If modified and valid, kick it. */ | ||
1522 | if((tagval & 0x60) == 0x60) | ||
1523 | cypress_sucks = *(unsigned long *) | ||
1524 | (0xf0020000 + faddr); | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | /* And one more, for our good neighbor, Mr. Broken Cypress. */ | ||
1529 | clear = srmmu_get_faddr(); | ||
1530 | clear = srmmu_get_fstatus(); | ||
1531 | |||
1532 | mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); | ||
1533 | srmmu_set_mmureg(mreg); | ||
1534 | } | ||
1535 | |||
1536 | static void __init init_cypress_common(void) | ||
1537 | { | ||
1538 | init_vac_layout(); | ||
1539 | |||
1540 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | ||
1541 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | ||
1542 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | ||
1543 | BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); | ||
1544 | BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1545 | BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); | ||
1546 | BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); | ||
1547 | |||
1548 | BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1549 | BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1550 | BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1551 | BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1552 | |||
1553 | |||
1554 | BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
1555 | BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); | ||
1556 | BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); | ||
1557 | |||
1558 | poke_srmmu = poke_cypress; | ||
1559 | } | ||
1560 | |||
1561 | static void __init init_cypress_604(void) | ||
1562 | { | ||
1563 | srmmu_name = "ROSS Cypress-604(UP)"; | ||
1564 | srmmu_modtype = Cypress; | ||
1565 | init_cypress_common(); | ||
1566 | } | ||
1567 | |||
1568 | static void __init init_cypress_605(unsigned long mrev) | ||
1569 | { | ||
1570 | srmmu_name = "ROSS Cypress-605(MP)"; | ||
1571 | if(mrev == 0xe) { | ||
1572 | srmmu_modtype = Cypress_vE; | ||
1573 | hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; | ||
1574 | } else { | ||
1575 | if(mrev == 0xd) { | ||
1576 | srmmu_modtype = Cypress_vD; | ||
1577 | hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; | ||
1578 | } else { | ||
1579 | srmmu_modtype = Cypress; | ||
1580 | } | ||
1581 | } | ||
1582 | init_cypress_common(); | ||
1583 | } | ||
1584 | |||
1585 | static void __init poke_swift(void) | ||
1586 | { | ||
1587 | unsigned long mreg; | ||
1588 | |||
1589 | /* Clear any crap from the cache or else... */ | ||
1590 | swift_flush_cache_all(); | ||
1591 | |||
1592 | /* Enable I & D caches */ | ||
1593 | mreg = srmmu_get_mmureg(); | ||
1594 | mreg |= (SWIFT_IE | SWIFT_DE); | ||
1595 | /* | ||
1596 | * The Swift branch folding logic is completely broken. At | ||
1597 | * trap time, if things are just right, if can mistakenly | ||
1598 | * think that a trap is coming from kernel mode when in fact | ||
1599 | * it is coming from user mode (it mis-executes the branch in | ||
1600 | * the trap code). So you see things like crashme completely | ||
1601 | * hosing your machine which is completely unacceptable. Turn | ||
1602 | * this shit off... nice job Fujitsu. | ||
1603 | */ | ||
1604 | mreg &= ~(SWIFT_BF); | ||
1605 | srmmu_set_mmureg(mreg); | ||
1606 | } | ||
1607 | |||
1608 | #define SWIFT_MASKID_ADDR 0x10003018 | ||
1609 | static void __init init_swift(void) | ||
1610 | { | ||
1611 | unsigned long swift_rev; | ||
1612 | |||
1613 | __asm__ __volatile__("lda [%1] %2, %0\n\t" | ||
1614 | "srl %0, 0x18, %0\n\t" : | ||
1615 | "=r" (swift_rev) : | ||
1616 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); | ||
1617 | srmmu_name = "Fujitsu Swift"; | ||
1618 | switch(swift_rev) { | ||
1619 | case 0x11: | ||
1620 | case 0x20: | ||
1621 | case 0x23: | ||
1622 | case 0x30: | ||
1623 | srmmu_modtype = Swift_lots_o_bugs; | ||
1624 | hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); | ||
1625 | /* | ||
1626 | * Gee george, I wonder why Sun is so hush hush about | ||
1627 | * this hardware bug... really braindamage stuff going | ||
1628 | * on here. However I think we can find a way to avoid | ||
1629 | * all of the workaround overhead under Linux. Basically, | ||
1630 | * any page fault can cause kernel pages to become user | ||
1631 | * accessible (the mmu gets confused and clears some of | ||
1632 | * the ACC bits in kernel ptes). Aha, sounds pretty | ||
1633 | * horrible eh? But wait, after extensive testing it appears | ||
1634 | * that if you use pgd_t level large kernel pte's (like the | ||
1635 | * 4MB pages on the Pentium) the bug does not get tripped | ||
1636 | * at all. This avoids almost all of the major overhead. | ||
1637 | * Welcome to a world where your vendor tells you to, | ||
1638 | * "apply this kernel patch" instead of "sorry for the | ||
1639 | * broken hardware, send it back and we'll give you | ||
1640 | * properly functioning parts" | ||
1641 | */ | ||
1642 | break; | ||
1643 | case 0x25: | ||
1644 | case 0x31: | ||
1645 | srmmu_modtype = Swift_bad_c; | ||
1646 | hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; | ||
1647 | /* | ||
1648 | * You see Sun allude to this hardware bug but never | ||
1649 | * admit things directly, they'll say things like, | ||
1650 | * "the Swift chip cache problems" or similar. | ||
1651 | */ | ||
1652 | break; | ||
1653 | default: | ||
1654 | srmmu_modtype = Swift_ok; | ||
1655 | break; | ||
1656 | }; | ||
1657 | |||
1658 | BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); | ||
1659 | BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1660 | BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); | ||
1661 | BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); | ||
1662 | |||
1663 | |||
1664 | BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1665 | BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1666 | BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1667 | BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1668 | |||
1669 | BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
1670 | BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); | ||
1671 | BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); | ||
1672 | |||
1673 | BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); | ||
1674 | |||
1675 | flush_page_for_dma_global = 0; | ||
1676 | |||
1677 | /* | ||
1678 | * Are you now convinced that the Swift is one of the | ||
1679 | * biggest VLSI abortions of all time? Bravo Fujitsu! | ||
1680 | * Fujitsu, the !#?!%$'d up processor people. I bet if | ||
1681 | * you examined the microcode of the Swift you'd find | ||
1682 | * XXX's all over the place. | ||
1683 | */ | ||
1684 | poke_srmmu = poke_swift; | ||
1685 | } | ||
1686 | |||
1687 | static void turbosparc_flush_cache_all(void) | ||
1688 | { | ||
1689 | flush_user_windows(); | ||
1690 | turbosparc_idflash_clear(); | ||
1691 | } | ||
1692 | |||
1693 | static void turbosparc_flush_cache_mm(struct mm_struct *mm) | ||
1694 | { | ||
1695 | FLUSH_BEGIN(mm) | ||
1696 | flush_user_windows(); | ||
1697 | turbosparc_idflash_clear(); | ||
1698 | FLUSH_END | ||
1699 | } | ||
1700 | |||
1701 | static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
1702 | { | ||
1703 | FLUSH_BEGIN(vma->vm_mm) | ||
1704 | flush_user_windows(); | ||
1705 | turbosparc_idflash_clear(); | ||
1706 | FLUSH_END | ||
1707 | } | ||
1708 | |||
1709 | static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | ||
1710 | { | ||
1711 | FLUSH_BEGIN(vma->vm_mm) | ||
1712 | flush_user_windows(); | ||
1713 | if (vma->vm_flags & VM_EXEC) | ||
1714 | turbosparc_flush_icache(); | ||
1715 | turbosparc_flush_dcache(); | ||
1716 | FLUSH_END | ||
1717 | } | ||
1718 | |||
1719 | /* TurboSparc is copy-back, if we turn it on, but this does not work. */ | ||
1720 | static void turbosparc_flush_page_to_ram(unsigned long page) | ||
1721 | { | ||
1722 | #ifdef TURBOSPARC_WRITEBACK | ||
1723 | volatile unsigned long clear; | ||
1724 | |||
1725 | if (srmmu_hwprobe(page)) | ||
1726 | turbosparc_flush_page_cache(page); | ||
1727 | clear = srmmu_get_fstatus(); | ||
1728 | #endif | ||
1729 | } | ||
1730 | |||
1731 | static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | ||
1732 | { | ||
1733 | } | ||
1734 | |||
1735 | static void turbosparc_flush_page_for_dma(unsigned long page) | ||
1736 | { | ||
1737 | turbosparc_flush_dcache(); | ||
1738 | } | ||
1739 | |||
1740 | static void turbosparc_flush_tlb_all(void) | ||
1741 | { | ||
1742 | srmmu_flush_whole_tlb(); | ||
1743 | } | ||
1744 | |||
1745 | static void turbosparc_flush_tlb_mm(struct mm_struct *mm) | ||
1746 | { | ||
1747 | FLUSH_BEGIN(mm) | ||
1748 | srmmu_flush_whole_tlb(); | ||
1749 | FLUSH_END | ||
1750 | } | ||
1751 | |||
1752 | static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
1753 | { | ||
1754 | FLUSH_BEGIN(vma->vm_mm) | ||
1755 | srmmu_flush_whole_tlb(); | ||
1756 | FLUSH_END | ||
1757 | } | ||
1758 | |||
1759 | static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
1760 | { | ||
1761 | FLUSH_BEGIN(vma->vm_mm) | ||
1762 | srmmu_flush_whole_tlb(); | ||
1763 | FLUSH_END | ||
1764 | } | ||
1765 | |||
1766 | |||
1767 | static void __init poke_turbosparc(void) | ||
1768 | { | ||
1769 | unsigned long mreg = srmmu_get_mmureg(); | ||
1770 | unsigned long ccreg; | ||
1771 | |||
1772 | /* Clear any crap from the cache or else... */ | ||
1773 | turbosparc_flush_cache_all(); | ||
1774 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ | ||
1775 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ | ||
1776 | srmmu_set_mmureg(mreg); | ||
1777 | |||
1778 | ccreg = turbosparc_get_ccreg(); | ||
1779 | |||
1780 | #ifdef TURBOSPARC_WRITEBACK | ||
1781 | ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ | ||
1782 | ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); | ||
1783 | /* Write-back D-cache, emulate VLSI | ||
1784 | * abortion number three, not number one */ | ||
1785 | #else | ||
1786 | /* For now let's play safe, optimize later */ | ||
1787 | ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); | ||
1788 | /* Do DVMA snooping in Dcache, Write-thru D-cache */ | ||
1789 | ccreg &= ~(TURBOSPARC_uS2); | ||
1790 | /* Emulate VLSI abortion number three, not number one */ | ||
1791 | #endif | ||
1792 | |||
1793 | switch (ccreg & 7) { | ||
1794 | case 0: /* No SE cache */ | ||
1795 | case 7: /* Test mode */ | ||
1796 | break; | ||
1797 | default: | ||
1798 | ccreg |= (TURBOSPARC_SCENABLE); | ||
1799 | } | ||
1800 | turbosparc_set_ccreg (ccreg); | ||
1801 | |||
1802 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ | ||
1803 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ | ||
1804 | srmmu_set_mmureg(mreg); | ||
1805 | } | ||
1806 | |||
1807 | static void __init init_turbosparc(void) | ||
1808 | { | ||
1809 | srmmu_name = "Fujitsu TurboSparc"; | ||
1810 | srmmu_modtype = TurboSparc; | ||
1811 | |||
1812 | BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); | ||
1813 | BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1814 | BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); | ||
1815 | BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); | ||
1816 | |||
1817 | BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1818 | BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1819 | BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1820 | BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1821 | |||
1822 | BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
1823 | |||
1824 | BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); | ||
1825 | BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); | ||
1826 | |||
1827 | poke_srmmu = poke_turbosparc; | ||
1828 | } | ||
1829 | |||
1830 | static void __init poke_tsunami(void) | ||
1831 | { | ||
1832 | unsigned long mreg = srmmu_get_mmureg(); | ||
1833 | |||
1834 | tsunami_flush_icache(); | ||
1835 | tsunami_flush_dcache(); | ||
1836 | mreg &= ~TSUNAMI_ITD; | ||
1837 | mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); | ||
1838 | srmmu_set_mmureg(mreg); | ||
1839 | } | ||
1840 | |||
1841 | static void __init init_tsunami(void) | ||
1842 | { | ||
1843 | /* | ||
1844 | * Tsunami's pretty sane, Sun and TI actually got it | ||
1845 | * somewhat right this time. Fujitsu should have | ||
1846 | * taken some lessons from them. | ||
1847 | */ | ||
1848 | |||
1849 | srmmu_name = "TI Tsunami"; | ||
1850 | srmmu_modtype = Tsunami; | ||
1851 | |||
1852 | BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); | ||
1853 | BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1854 | BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); | ||
1855 | BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); | ||
1856 | |||
1857 | |||
1858 | BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1859 | BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1860 | BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1861 | BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1862 | |||
1863 | BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); | ||
1864 | BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); | ||
1865 | BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); | ||
1866 | |||
1867 | poke_srmmu = poke_tsunami; | ||
1868 | |||
1869 | tsunami_setup_blockops(); | ||
1870 | } | ||
1871 | |||
1872 | static void __init poke_viking(void) | ||
1873 | { | ||
1874 | unsigned long mreg = srmmu_get_mmureg(); | ||
1875 | static int smp_catch; | ||
1876 | |||
1877 | if(viking_mxcc_present) { | ||
1878 | unsigned long mxcc_control = mxcc_get_creg(); | ||
1879 | |||
1880 | mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); | ||
1881 | mxcc_control &= ~(MXCC_CTL_RRC); | ||
1882 | mxcc_set_creg(mxcc_control); | ||
1883 | |||
1884 | /* | ||
1885 | * We don't need memory parity checks. | ||
1886 | * XXX This is a mess, have to dig out later. ecd. | ||
1887 | viking_mxcc_turn_off_parity(&mreg, &mxcc_control); | ||
1888 | */ | ||
1889 | |||
1890 | /* We do cache ptables on MXCC. */ | ||
1891 | mreg |= VIKING_TCENABLE; | ||
1892 | } else { | ||
1893 | unsigned long bpreg; | ||
1894 | |||
1895 | mreg &= ~(VIKING_TCENABLE); | ||
1896 | if(smp_catch++) { | ||
1897 | /* Must disable mixed-cmd mode here for other cpu's. */ | ||
1898 | bpreg = viking_get_bpreg(); | ||
1899 | bpreg &= ~(VIKING_ACTION_MIX); | ||
1900 | viking_set_bpreg(bpreg); | ||
1901 | |||
1902 | /* Just in case PROM does something funny. */ | ||
1903 | msi_set_sync(); | ||
1904 | } | ||
1905 | } | ||
1906 | |||
1907 | mreg |= VIKING_SPENABLE; | ||
1908 | mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); | ||
1909 | mreg |= VIKING_SBENABLE; | ||
1910 | mreg &= ~(VIKING_ACENABLE); | ||
1911 | srmmu_set_mmureg(mreg); | ||
1912 | |||
1913 | #ifdef CONFIG_SMP | ||
1914 | /* Avoid unnecessary cross calls. */ | ||
1915 | BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); | ||
1916 | BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); | ||
1917 | BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); | ||
1918 | BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); | ||
1919 | BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); | ||
1920 | BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); | ||
1921 | BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); | ||
1922 | btfixup(); | ||
1923 | #endif | ||
1924 | } | ||
1925 | |||
1926 | static void __init init_viking(void) | ||
1927 | { | ||
1928 | unsigned long mreg = srmmu_get_mmureg(); | ||
1929 | |||
1930 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ | ||
1931 | if(mreg & VIKING_MMODE) { | ||
1932 | srmmu_name = "TI Viking"; | ||
1933 | viking_mxcc_present = 0; | ||
1934 | msi_set_sync(); | ||
1935 | |||
1936 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); | ||
1937 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); | ||
1938 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); | ||
1939 | |||
1940 | /* | ||
1941 | * We need this to make sure old viking takes no hits | ||
1942 | * on it's cache for dma snoops to workaround the | ||
1943 | * "load from non-cacheable memory" interrupt bug. | ||
1944 | * This is only necessary because of the new way in | ||
1945 | * which we use the IOMMU. | ||
1946 | */ | ||
1947 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); | ||
1948 | |||
1949 | flush_page_for_dma_global = 0; | ||
1950 | } else { | ||
1951 | srmmu_name = "TI Viking/MXCC"; | ||
1952 | viking_mxcc_present = 1; | ||
1953 | |||
1954 | srmmu_cache_pagetables = 1; | ||
1955 | |||
1956 | /* MXCC vikings lack the DMA snooping bug. */ | ||
1957 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); | ||
1958 | } | ||
1959 | |||
1960 | BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); | ||
1961 | BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); | ||
1962 | BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); | ||
1963 | BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); | ||
1964 | |||
1965 | #ifdef CONFIG_SMP | ||
1966 | if (sparc_cpu_model == sun4d) { | ||
1967 | BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1968 | BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1969 | BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1970 | BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1971 | } else | ||
1972 | #endif | ||
1973 | { | ||
1974 | BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); | ||
1975 | BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
1976 | BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); | ||
1977 | BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); | ||
1978 | } | ||
1979 | |||
1980 | BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); | ||
1981 | BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); | ||
1982 | |||
1983 | poke_srmmu = poke_viking; | ||
1984 | } | ||
1985 | |||
1986 | /* Probe for the srmmu chip version. */ | ||
1987 | static void __init get_srmmu_type(void) | ||
1988 | { | ||
1989 | unsigned long mreg, psr; | ||
1990 | unsigned long mod_typ, mod_rev, psr_typ, psr_vers; | ||
1991 | |||
1992 | srmmu_modtype = SRMMU_INVAL_MOD; | ||
1993 | hwbug_bitmask = 0; | ||
1994 | |||
1995 | mreg = srmmu_get_mmureg(); psr = get_psr(); | ||
1996 | mod_typ = (mreg & 0xf0000000) >> 28; | ||
1997 | mod_rev = (mreg & 0x0f000000) >> 24; | ||
1998 | psr_typ = (psr >> 28) & 0xf; | ||
1999 | psr_vers = (psr >> 24) & 0xf; | ||
2000 | |||
2001 | /* First, check for HyperSparc or Cypress. */ | ||
2002 | if(mod_typ == 1) { | ||
2003 | switch(mod_rev) { | ||
2004 | case 7: | ||
2005 | /* UP or MP Hypersparc */ | ||
2006 | init_hypersparc(); | ||
2007 | break; | ||
2008 | case 0: | ||
2009 | case 2: | ||
2010 | /* Uniprocessor Cypress */ | ||
2011 | init_cypress_604(); | ||
2012 | break; | ||
2013 | case 10: | ||
2014 | case 11: | ||
2015 | case 12: | ||
2016 | /* _REALLY OLD_ Cypress MP chips... */ | ||
2017 | case 13: | ||
2018 | case 14: | ||
2019 | case 15: | ||
2020 | /* MP Cypress mmu/cache-controller */ | ||
2021 | init_cypress_605(mod_rev); | ||
2022 | break; | ||
2023 | default: | ||
2024 | /* Some other Cypress revision, assume a 605. */ | ||
2025 | init_cypress_605(mod_rev); | ||
2026 | break; | ||
2027 | }; | ||
2028 | return; | ||
2029 | } | ||
2030 | |||
2031 | /* | ||
2032 | * Now Fujitsu TurboSparc. It might happen that it is | ||
2033 | * in Swift emulation mode, so we will check later... | ||
2034 | */ | ||
2035 | if (psr_typ == 0 && psr_vers == 5) { | ||
2036 | init_turbosparc(); | ||
2037 | return; | ||
2038 | } | ||
2039 | |||
2040 | /* Next check for Fujitsu Swift. */ | ||
2041 | if(psr_typ == 0 && psr_vers == 4) { | ||
2042 | int cpunode; | ||
2043 | char node_str[128]; | ||
2044 | |||
2045 | /* Look if it is not a TurboSparc emulating Swift... */ | ||
2046 | cpunode = prom_getchild(prom_root_node); | ||
2047 | while((cpunode = prom_getsibling(cpunode)) != 0) { | ||
2048 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | ||
2049 | if(!strcmp(node_str, "cpu")) { | ||
2050 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && | ||
2051 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { | ||
2052 | init_turbosparc(); | ||
2053 | return; | ||
2054 | } | ||
2055 | break; | ||
2056 | } | ||
2057 | } | ||
2058 | |||
2059 | init_swift(); | ||
2060 | return; | ||
2061 | } | ||
2062 | |||
2063 | /* Now the Viking family of srmmu. */ | ||
2064 | if(psr_typ == 4 && | ||
2065 | ((psr_vers == 0) || | ||
2066 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { | ||
2067 | init_viking(); | ||
2068 | return; | ||
2069 | } | ||
2070 | |||
2071 | /* Finally the Tsunami. */ | ||
2072 | if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { | ||
2073 | init_tsunami(); | ||
2074 | return; | ||
2075 | } | ||
2076 | |||
2077 | /* Oh well */ | ||
2078 | srmmu_is_bad(); | ||
2079 | } | ||
2080 | |||
2081 | /* don't laugh, static pagetables */ | ||
2082 | static void srmmu_check_pgt_cache(int low, int high) | ||
2083 | { | ||
2084 | } | ||
2085 | |||
2086 | extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, | ||
2087 | tsetup_mmu_patchme, rtrap_mmu_patchme; | ||
2088 | |||
2089 | extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, | ||
2090 | tsetup_srmmu_stackchk, srmmu_rett_stackchk; | ||
2091 | |||
2092 | extern unsigned long srmmu_fault; | ||
2093 | |||
2094 | #define PATCH_BRANCH(insn, dest) do { \ | ||
2095 | iaddr = &(insn); \ | ||
2096 | daddr = &(dest); \ | ||
2097 | *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ | ||
2098 | } while(0) | ||
2099 | |||
2100 | static void __init patch_window_trap_handlers(void) | ||
2101 | { | ||
2102 | unsigned long *iaddr, *daddr; | ||
2103 | |||
2104 | PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); | ||
2105 | PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); | ||
2106 | PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); | ||
2107 | PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); | ||
2108 | PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); | ||
2109 | PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); | ||
2110 | PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); | ||
2111 | } | ||
2112 | |||
2113 | #ifdef CONFIG_SMP | ||
2114 | /* Local cross-calls. */ | ||
2115 | static void smp_flush_page_for_dma(unsigned long page) | ||
2116 | { | ||
2117 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); | ||
2118 | local_flush_page_for_dma(page); | ||
2119 | } | ||
2120 | |||
2121 | #endif | ||
2122 | |||
2123 | static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) | ||
2124 | { | ||
2125 | return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); | ||
2126 | } | ||
2127 | |||
2128 | static unsigned long srmmu_pte_to_pgoff(pte_t pte) | ||
2129 | { | ||
2130 | return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; | ||
2131 | } | ||
2132 | |||
2133 | /* Load up routines and constants for sun4m and sun4d mmu */ | ||
2134 | void __init ld_mmu_srmmu(void) | ||
2135 | { | ||
2136 | extern void ld_mmu_iommu(void); | ||
2137 | extern void ld_mmu_iounit(void); | ||
2138 | extern void ___xchg32_sun4md(void); | ||
2139 | |||
2140 | BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); | ||
2141 | BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); | ||
2142 | BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); | ||
2143 | |||
2144 | BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); | ||
2145 | BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); | ||
2146 | |||
2147 | BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); | ||
2148 | BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); | ||
2149 | BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); | ||
2150 | BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); | ||
2151 | BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); | ||
2152 | page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); | ||
2153 | pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF; | ||
2154 | |||
2155 | /* Functions */ | ||
2156 | #ifndef CONFIG_SMP | ||
2157 | BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); | ||
2158 | #endif | ||
2159 | BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); | ||
2160 | |||
2161 | BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); | ||
2162 | BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); | ||
2163 | |||
2164 | BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); | ||
2165 | BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); | ||
2166 | BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); | ||
2167 | |||
2168 | BTFIXUPSET_SETHI(none_mask, 0xF0000000); | ||
2169 | |||
2170 | BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); | ||
2171 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); | ||
2172 | BTFIXUPSET_CALL(pte_read, srmmu_pte_read, BTFIXUPCALL_NORM); | ||
2173 | |||
2174 | BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); | ||
2175 | BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); | ||
2176 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); | ||
2177 | |||
2178 | BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); | ||
2179 | BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); | ||
2180 | BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); | ||
2181 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); | ||
2182 | |||
2183 | BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); | ||
2184 | BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); | ||
2185 | BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); | ||
2186 | BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); | ||
2187 | BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); | ||
2188 | BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); | ||
2189 | |||
2190 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); | ||
2191 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); | ||
2192 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); | ||
2193 | |||
2194 | BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); | ||
2195 | BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); | ||
2196 | BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); | ||
2197 | BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); | ||
2198 | BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); | ||
2199 | BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); | ||
2200 | BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); | ||
2201 | BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); | ||
2202 | |||
2203 | BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); | ||
2204 | BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); | ||
2205 | BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); | ||
2206 | BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); | ||
2207 | BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); | ||
2208 | BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); | ||
2209 | BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); | ||
2210 | BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); | ||
2211 | BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); | ||
2212 | BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); | ||
2213 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); | ||
2214 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); | ||
2215 | |||
2216 | BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); | ||
2217 | BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); | ||
2218 | |||
2219 | BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); | ||
2220 | BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); | ||
2221 | BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); | ||
2222 | |||
2223 | BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); | ||
2224 | |||
2225 | BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); | ||
2226 | BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); | ||
2227 | |||
2228 | BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); | ||
2229 | BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); | ||
2230 | |||
2231 | get_srmmu_type(); | ||
2232 | patch_window_trap_handlers(); | ||
2233 | |||
2234 | #ifdef CONFIG_SMP | ||
2235 | /* El switcheroo... */ | ||
2236 | |||
2237 | BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); | ||
2238 | BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); | ||
2239 | BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); | ||
2240 | BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); | ||
2241 | BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); | ||
2242 | BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); | ||
2243 | BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); | ||
2244 | BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); | ||
2245 | BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); | ||
2246 | BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); | ||
2247 | BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); | ||
2248 | |||
2249 | BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); | ||
2250 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); | ||
2251 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); | ||
2252 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); | ||
2253 | if (sparc_cpu_model != sun4d) { | ||
2254 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2255 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
2256 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); | ||
2257 | BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); | ||
2258 | } | ||
2259 | BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
2260 | BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); | ||
2261 | BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); | ||
2262 | #endif | ||
2263 | |||
2264 | if (sparc_cpu_model == sun4d) | ||
2265 | ld_mmu_iounit(); | ||
2266 | else | ||
2267 | ld_mmu_iommu(); | ||
2268 | #ifdef CONFIG_SMP | ||
2269 | if (sparc_cpu_model == sun4d) | ||
2270 | sun4d_init_smp(); | ||
2271 | else | ||
2272 | sun4m_init_smp(); | ||
2273 | #endif | ||
2274 | } | ||
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c new file mode 100644 index 000000000000..1d560390e282 --- /dev/null +++ b/arch/sparc/mm/sun4c.c | |||
@@ -0,0 +1,2276 @@ | |||
1 | /* $Id: sun4c.c,v 1.212 2001/12/21 04:56:15 davem Exp $ | ||
2 | * sun4c.c: Doing in software what should be done in hardware. | ||
3 | * | ||
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
6 | * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au) | ||
7 | * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org) | ||
8 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
9 | */ | ||
10 | |||
11 | #define NR_TASK_BUCKETS 512 | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | #include <linux/highmem.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | |||
22 | #include <asm/scatterlist.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/vaddrs.h> | ||
27 | #include <asm/idprom.h> | ||
28 | #include <asm/machines.h> | ||
29 | #include <asm/memreg.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/auxio.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/oplib.h> | ||
34 | #include <asm/openprom.h> | ||
35 | #include <asm/mmu_context.h> | ||
36 | #include <asm/sun4paddr.h> | ||
37 | #include <asm/highmem.h> | ||
38 | #include <asm/btfixup.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/tlbflush.h> | ||
41 | |||
42 | /* Because of our dynamic kernel TLB miss strategy, and how | ||
43 | * our DVMA mapping allocation works, you _MUST_: | ||
44 | * | ||
45 | * 1) Disable interrupts _and_ not touch any dynamic kernel | ||
46 | * memory while messing with kernel MMU state. By | ||
47 | * dynamic memory I mean any object which is not in | ||
48 | * the kernel image itself or a thread_union (both of | ||
49 | * which are locked into the MMU). | ||
50 | * 2) Disable interrupts while messing with user MMU state. | ||
51 | */ | ||
52 | |||
53 | extern int num_segmaps, num_contexts; | ||
54 | |||
55 | extern unsigned long page_kernel; | ||
56 | |||
57 | #ifdef CONFIG_SUN4 | ||
58 | #define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes | ||
59 | #else | ||
60 | /* That's it, we prom_halt() on sun4c if the cache size is something other than 65536. | ||
61 | * So let's save some cycles and just use that everywhere except for that bootup | ||
62 | * sanity check. | ||
63 | */ | ||
64 | #define SUN4C_VAC_SIZE 65536 | ||
65 | #endif | ||
66 | |||
67 | #define SUN4C_KERNEL_BUCKETS 32 | ||
68 | |||
69 | /* Flushing the cache. */ | ||
70 | struct sun4c_vac_props sun4c_vacinfo; | ||
71 | unsigned long sun4c_kernel_faults; | ||
72 | |||
73 | /* Invalidate every sun4c cache line tag. */ | ||
74 | static void __init sun4c_flush_all(void) | ||
75 | { | ||
76 | unsigned long begin, end; | ||
77 | |||
78 | if (sun4c_vacinfo.on) | ||
79 | panic("SUN4C: AIEEE, trying to invalidate vac while it is on."); | ||
80 | |||
81 | /* Clear 'valid' bit in all cache line tags */ | ||
82 | begin = AC_CACHETAGS; | ||
83 | end = (AC_CACHETAGS + SUN4C_VAC_SIZE); | ||
84 | while (begin < end) { | ||
85 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : | ||
86 | "r" (begin), "i" (ASI_CONTROL)); | ||
87 | begin += sun4c_vacinfo.linesize; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static void sun4c_flush_context_hw(void) | ||
92 | { | ||
93 | unsigned long end = SUN4C_VAC_SIZE; | ||
94 | |||
95 | __asm__ __volatile__( | ||
96 | "1: addcc %0, -4096, %0\n\t" | ||
97 | " bne 1b\n\t" | ||
98 | " sta %%g0, [%0] %2" | ||
99 | : "=&r" (end) | ||
100 | : "0" (end), "i" (ASI_HWFLUSHCONTEXT) | ||
101 | : "cc"); | ||
102 | } | ||
103 | |||
104 | /* Must be called minimally with IRQs disabled. */ | ||
105 | static void sun4c_flush_segment_hw(unsigned long addr) | ||
106 | { | ||
107 | if (sun4c_get_segmap(addr) != invalid_segment) { | ||
108 | unsigned long vac_size = SUN4C_VAC_SIZE; | ||
109 | |||
110 | __asm__ __volatile__( | ||
111 | "1: addcc %0, -4096, %0\n\t" | ||
112 | " bne 1b\n\t" | ||
113 | " sta %%g0, [%2 + %0] %3" | ||
114 | : "=&r" (vac_size) | ||
115 | : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG) | ||
116 | : "cc"); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* File local boot time fixups. */ | ||
121 | BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long) | ||
122 | BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long) | ||
123 | BTFIXUPDEF_CALL(void, sun4c_flush_context, void) | ||
124 | |||
125 | #define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr) | ||
126 | #define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr) | ||
127 | #define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)() | ||
128 | |||
129 | /* Must be called minimally with interrupts disabled. */ | ||
130 | static void sun4c_flush_page_hw(unsigned long addr) | ||
131 | { | ||
132 | addr &= PAGE_MASK; | ||
133 | if ((int)sun4c_get_pte(addr) < 0) | ||
134 | __asm__ __volatile__("sta %%g0, [%0] %1" | ||
135 | : : "r" (addr), "i" (ASI_HWFLUSHPAGE)); | ||
136 | } | ||
137 | |||
138 | /* Don't inline the software version as it eats too many cache lines if expanded. */ | ||
139 | static void sun4c_flush_context_sw(void) | ||
140 | { | ||
141 | unsigned long nbytes = SUN4C_VAC_SIZE; | ||
142 | unsigned long lsize = sun4c_vacinfo.linesize; | ||
143 | |||
144 | __asm__ __volatile__( | ||
145 | "add %2, %2, %%g1\n\t" | ||
146 | "add %2, %%g1, %%g2\n\t" | ||
147 | "add %2, %%g2, %%g3\n\t" | ||
148 | "add %2, %%g3, %%g4\n\t" | ||
149 | "add %2, %%g4, %%g5\n\t" | ||
150 | "add %2, %%g5, %%o4\n\t" | ||
151 | "add %2, %%o4, %%o5\n" | ||
152 | "1:\n\t" | ||
153 | "subcc %0, %%o5, %0\n\t" | ||
154 | "sta %%g0, [%0] %3\n\t" | ||
155 | "sta %%g0, [%0 + %2] %3\n\t" | ||
156 | "sta %%g0, [%0 + %%g1] %3\n\t" | ||
157 | "sta %%g0, [%0 + %%g2] %3\n\t" | ||
158 | "sta %%g0, [%0 + %%g3] %3\n\t" | ||
159 | "sta %%g0, [%0 + %%g4] %3\n\t" | ||
160 | "sta %%g0, [%0 + %%g5] %3\n\t" | ||
161 | "bg 1b\n\t" | ||
162 | " sta %%g0, [%1 + %%o4] %3\n" | ||
163 | : "=&r" (nbytes) | ||
164 | : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX) | ||
165 | : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc"); | ||
166 | } | ||
167 | |||
168 | /* Don't inline the software version as it eats too many cache lines if expanded. */ | ||
169 | static void sun4c_flush_segment_sw(unsigned long addr) | ||
170 | { | ||
171 | if (sun4c_get_segmap(addr) != invalid_segment) { | ||
172 | unsigned long nbytes = SUN4C_VAC_SIZE; | ||
173 | unsigned long lsize = sun4c_vacinfo.linesize; | ||
174 | |||
175 | __asm__ __volatile__( | ||
176 | "add %2, %2, %%g1\n\t" | ||
177 | "add %2, %%g1, %%g2\n\t" | ||
178 | "add %2, %%g2, %%g3\n\t" | ||
179 | "add %2, %%g3, %%g4\n\t" | ||
180 | "add %2, %%g4, %%g5\n\t" | ||
181 | "add %2, %%g5, %%o4\n\t" | ||
182 | "add %2, %%o4, %%o5\n" | ||
183 | "1:\n\t" | ||
184 | "subcc %1, %%o5, %1\n\t" | ||
185 | "sta %%g0, [%0] %6\n\t" | ||
186 | "sta %%g0, [%0 + %2] %6\n\t" | ||
187 | "sta %%g0, [%0 + %%g1] %6\n\t" | ||
188 | "sta %%g0, [%0 + %%g2] %6\n\t" | ||
189 | "sta %%g0, [%0 + %%g3] %6\n\t" | ||
190 | "sta %%g0, [%0 + %%g4] %6\n\t" | ||
191 | "sta %%g0, [%0 + %%g5] %6\n\t" | ||
192 | "sta %%g0, [%0 + %%o4] %6\n\t" | ||
193 | "bg 1b\n\t" | ||
194 | " add %0, %%o5, %0\n" | ||
195 | : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize) | ||
196 | : "0" (addr), "1" (nbytes), "2" (lsize), | ||
197 | "i" (ASI_FLUSHSEG) | ||
198 | : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc"); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* Don't inline the software version as it eats too many cache lines if expanded. */ | ||
203 | static void sun4c_flush_page_sw(unsigned long addr) | ||
204 | { | ||
205 | addr &= PAGE_MASK; | ||
206 | if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) == | ||
207 | _SUN4C_PAGE_VALID) { | ||
208 | unsigned long left = PAGE_SIZE; | ||
209 | unsigned long lsize = sun4c_vacinfo.linesize; | ||
210 | |||
211 | __asm__ __volatile__( | ||
212 | "add %2, %2, %%g1\n\t" | ||
213 | "add %2, %%g1, %%g2\n\t" | ||
214 | "add %2, %%g2, %%g3\n\t" | ||
215 | "add %2, %%g3, %%g4\n\t" | ||
216 | "add %2, %%g4, %%g5\n\t" | ||
217 | "add %2, %%g5, %%o4\n\t" | ||
218 | "add %2, %%o4, %%o5\n" | ||
219 | "1:\n\t" | ||
220 | "subcc %1, %%o5, %1\n\t" | ||
221 | "sta %%g0, [%0] %6\n\t" | ||
222 | "sta %%g0, [%0 + %2] %6\n\t" | ||
223 | "sta %%g0, [%0 + %%g1] %6\n\t" | ||
224 | "sta %%g0, [%0 + %%g2] %6\n\t" | ||
225 | "sta %%g0, [%0 + %%g3] %6\n\t" | ||
226 | "sta %%g0, [%0 + %%g4] %6\n\t" | ||
227 | "sta %%g0, [%0 + %%g5] %6\n\t" | ||
228 | "sta %%g0, [%0 + %%o4] %6\n\t" | ||
229 | "bg 1b\n\t" | ||
230 | " add %0, %%o5, %0\n" | ||
231 | : "=&r" (addr), "=&r" (left), "=&r" (lsize) | ||
232 | : "0" (addr), "1" (left), "2" (lsize), | ||
233 | "i" (ASI_FLUSHPG) | ||
234 | : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc"); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /* The sun4c's do have an on chip store buffer. And the way you | ||
239 | * clear them out isn't so obvious. The only way I can think of | ||
240 | * to accomplish this is to read the current context register, | ||
241 | * store the same value there, then read an external hardware | ||
242 | * register. | ||
243 | */ | ||
244 | void sun4c_complete_all_stores(void) | ||
245 | { | ||
246 | volatile int _unused; | ||
247 | |||
248 | _unused = sun4c_get_context(); | ||
249 | sun4c_set_context(_unused); | ||
250 | #ifdef CONFIG_SUN_AUXIO | ||
251 | _unused = get_auxio(); | ||
252 | #endif | ||
253 | } | ||
254 | |||
255 | /* Bootup utility functions. */ | ||
256 | static inline void sun4c_init_clean_segmap(unsigned char pseg) | ||
257 | { | ||
258 | unsigned long vaddr; | ||
259 | |||
260 | sun4c_put_segmap(0, pseg); | ||
261 | for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE) | ||
262 | sun4c_put_pte(vaddr, 0); | ||
263 | sun4c_put_segmap(0, invalid_segment); | ||
264 | } | ||
265 | |||
266 | static inline void sun4c_init_clean_mmu(unsigned long kernel_end) | ||
267 | { | ||
268 | unsigned long vaddr; | ||
269 | unsigned char savectx, ctx; | ||
270 | |||
271 | savectx = sun4c_get_context(); | ||
272 | kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end); | ||
273 | for (ctx = 0; ctx < num_contexts; ctx++) { | ||
274 | sun4c_set_context(ctx); | ||
275 | for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE) | ||
276 | sun4c_put_segmap(vaddr, invalid_segment); | ||
277 | for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE) | ||
278 | sun4c_put_segmap(vaddr, invalid_segment); | ||
279 | for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE) | ||
280 | sun4c_put_segmap(vaddr, invalid_segment); | ||
281 | for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE) | ||
282 | sun4c_put_segmap(vaddr, invalid_segment); | ||
283 | } | ||
284 | sun4c_set_context(savectx); | ||
285 | } | ||
286 | |||
287 | void __init sun4c_probe_vac(void) | ||
288 | { | ||
289 | sun4c_disable_vac(); | ||
290 | |||
291 | if (ARCH_SUN4) { | ||
292 | switch (idprom->id_machtype) { | ||
293 | |||
294 | case (SM_SUN4|SM_4_110): | ||
295 | sun4c_vacinfo.type = VAC_NONE; | ||
296 | sun4c_vacinfo.num_bytes = 0; | ||
297 | sun4c_vacinfo.linesize = 0; | ||
298 | sun4c_vacinfo.do_hwflushes = 0; | ||
299 | prom_printf("No VAC. Get some bucks and buy a real computer."); | ||
300 | prom_halt(); | ||
301 | break; | ||
302 | |||
303 | case (SM_SUN4|SM_4_260): | ||
304 | sun4c_vacinfo.type = VAC_WRITE_BACK; | ||
305 | sun4c_vacinfo.num_bytes = 128 * 1024; | ||
306 | sun4c_vacinfo.linesize = 16; | ||
307 | sun4c_vacinfo.do_hwflushes = 0; | ||
308 | break; | ||
309 | |||
310 | case (SM_SUN4|SM_4_330): | ||
311 | sun4c_vacinfo.type = VAC_WRITE_THROUGH; | ||
312 | sun4c_vacinfo.num_bytes = 128 * 1024; | ||
313 | sun4c_vacinfo.linesize = 16; | ||
314 | sun4c_vacinfo.do_hwflushes = 0; | ||
315 | break; | ||
316 | |||
317 | case (SM_SUN4|SM_4_470): | ||
318 | sun4c_vacinfo.type = VAC_WRITE_BACK; | ||
319 | sun4c_vacinfo.num_bytes = 128 * 1024; | ||
320 | sun4c_vacinfo.linesize = 32; | ||
321 | sun4c_vacinfo.do_hwflushes = 0; | ||
322 | break; | ||
323 | |||
324 | default: | ||
325 | prom_printf("Cannot initialize VAC - weird sun4 model idprom->id_machtype = %d", idprom->id_machtype); | ||
326 | prom_halt(); | ||
327 | }; | ||
328 | } else { | ||
329 | sun4c_vacinfo.type = VAC_WRITE_THROUGH; | ||
330 | |||
331 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
332 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
333 | /* PROM on SS1 lacks this info, to be super safe we | ||
334 | * hard code it here since this arch is cast in stone. | ||
335 | */ | ||
336 | sun4c_vacinfo.num_bytes = 65536; | ||
337 | sun4c_vacinfo.linesize = 16; | ||
338 | } else { | ||
339 | sun4c_vacinfo.num_bytes = | ||
340 | prom_getintdefault(prom_root_node, "vac-size", 65536); | ||
341 | sun4c_vacinfo.linesize = | ||
342 | prom_getintdefault(prom_root_node, "vac-linesize", 16); | ||
343 | } | ||
344 | sun4c_vacinfo.do_hwflushes = | ||
345 | prom_getintdefault(prom_root_node, "vac-hwflush", 0); | ||
346 | |||
347 | if (sun4c_vacinfo.do_hwflushes == 0) | ||
348 | sun4c_vacinfo.do_hwflushes = | ||
349 | prom_getintdefault(prom_root_node, "vac_hwflush", 0); | ||
350 | |||
351 | if (sun4c_vacinfo.num_bytes != 65536) { | ||
352 | prom_printf("WEIRD Sun4C VAC cache size, " | ||
353 | "tell sparclinux@vger.kernel.org"); | ||
354 | prom_halt(); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | sun4c_vacinfo.num_lines = | ||
359 | (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize); | ||
360 | switch (sun4c_vacinfo.linesize) { | ||
361 | case 16: | ||
362 | sun4c_vacinfo.log2lsize = 4; | ||
363 | break; | ||
364 | case 32: | ||
365 | sun4c_vacinfo.log2lsize = 5; | ||
366 | break; | ||
367 | default: | ||
368 | prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n", | ||
369 | sun4c_vacinfo.linesize); | ||
370 | prom_halt(); | ||
371 | }; | ||
372 | |||
373 | sun4c_flush_all(); | ||
374 | sun4c_enable_vac(); | ||
375 | } | ||
376 | |||
377 | /* Patch instructions for the low level kernel fault handler. */ | ||
378 | extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff; | ||
379 | extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff; | ||
380 | extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff; | ||
381 | extern unsigned long num_context_patch1, num_context_patch1_16; | ||
382 | extern unsigned long num_context_patch2_16; | ||
383 | extern unsigned long vac_linesize_patch, vac_linesize_patch_32; | ||
384 | extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on; | ||
385 | extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on; | ||
386 | |||
387 | #define PATCH_INSN(src, dst) do { \ | ||
388 | daddr = &(dst); \ | ||
389 | iaddr = &(src); \ | ||
390 | *daddr = *iaddr; \ | ||
391 | } while (0) | ||
392 | |||
393 | static void __init patch_kernel_fault_handler(void) | ||
394 | { | ||
395 | unsigned long *iaddr, *daddr; | ||
396 | |||
397 | switch (num_segmaps) { | ||
398 | case 128: | ||
399 | /* Default, nothing to do. */ | ||
400 | break; | ||
401 | case 256: | ||
402 | PATCH_INSN(invalid_segment_patch1_ff, | ||
403 | invalid_segment_patch1); | ||
404 | PATCH_INSN(invalid_segment_patch2_ff, | ||
405 | invalid_segment_patch2); | ||
406 | break; | ||
407 | case 512: | ||
408 | PATCH_INSN(invalid_segment_patch1_1ff, | ||
409 | invalid_segment_patch1); | ||
410 | PATCH_INSN(invalid_segment_patch2_1ff, | ||
411 | invalid_segment_patch2); | ||
412 | break; | ||
413 | default: | ||
414 | prom_printf("Unhandled number of segmaps: %d\n", | ||
415 | num_segmaps); | ||
416 | prom_halt(); | ||
417 | }; | ||
418 | switch (num_contexts) { | ||
419 | case 8: | ||
420 | /* Default, nothing to do. */ | ||
421 | break; | ||
422 | case 16: | ||
423 | PATCH_INSN(num_context_patch1_16, | ||
424 | num_context_patch1); | ||
425 | break; | ||
426 | default: | ||
427 | prom_printf("Unhandled number of contexts: %d\n", | ||
428 | num_contexts); | ||
429 | prom_halt(); | ||
430 | }; | ||
431 | |||
432 | if (sun4c_vacinfo.do_hwflushes != 0) { | ||
433 | PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); | ||
434 | PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2); | ||
435 | } else { | ||
436 | switch (sun4c_vacinfo.linesize) { | ||
437 | case 16: | ||
438 | /* Default, nothing to do. */ | ||
439 | break; | ||
440 | case 32: | ||
441 | PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch); | ||
442 | break; | ||
443 | default: | ||
444 | prom_printf("Impossible VAC linesize %d, halting...\n", | ||
445 | sun4c_vacinfo.linesize); | ||
446 | prom_halt(); | ||
447 | }; | ||
448 | } | ||
449 | } | ||
450 | |||
451 | static void __init sun4c_probe_mmu(void) | ||
452 | { | ||
453 | if (ARCH_SUN4) { | ||
454 | switch (idprom->id_machtype) { | ||
455 | case (SM_SUN4|SM_4_110): | ||
456 | prom_printf("No support for 4100 yet\n"); | ||
457 | prom_halt(); | ||
458 | num_segmaps = 256; | ||
459 | num_contexts = 8; | ||
460 | break; | ||
461 | |||
462 | case (SM_SUN4|SM_4_260): | ||
463 | /* should be 512 segmaps. when it get fixed */ | ||
464 | num_segmaps = 256; | ||
465 | num_contexts = 16; | ||
466 | break; | ||
467 | |||
468 | case (SM_SUN4|SM_4_330): | ||
469 | num_segmaps = 256; | ||
470 | num_contexts = 16; | ||
471 | break; | ||
472 | |||
473 | case (SM_SUN4|SM_4_470): | ||
474 | /* should be 1024 segmaps. when it get fixed */ | ||
475 | num_segmaps = 256; | ||
476 | num_contexts = 64; | ||
477 | break; | ||
478 | default: | ||
479 | prom_printf("Invalid SUN4 model\n"); | ||
480 | prom_halt(); | ||
481 | }; | ||
482 | } else { | ||
483 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
484 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
485 | /* Hardcode these just to be safe, PROM on SS1 does | ||
486 | * not have this info available in the root node. | ||
487 | */ | ||
488 | num_segmaps = 128; | ||
489 | num_contexts = 8; | ||
490 | } else { | ||
491 | num_segmaps = | ||
492 | prom_getintdefault(prom_root_node, "mmu-npmg", 128); | ||
493 | num_contexts = | ||
494 | prom_getintdefault(prom_root_node, "mmu-nctx", 0x8); | ||
495 | } | ||
496 | } | ||
497 | patch_kernel_fault_handler(); | ||
498 | } | ||
499 | |||
500 | volatile unsigned long *sun4c_memerr_reg = NULL; | ||
501 | |||
502 | void __init sun4c_probe_memerr_reg(void) | ||
503 | { | ||
504 | int node; | ||
505 | struct linux_prom_registers regs[1]; | ||
506 | |||
507 | if (ARCH_SUN4) { | ||
508 | sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE); | ||
509 | } else { | ||
510 | node = prom_getchild(prom_root_node); | ||
511 | node = prom_searchsiblings(prom_root_node, "memory-error"); | ||
512 | if (!node) | ||
513 | return; | ||
514 | if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0) | ||
515 | return; | ||
516 | /* hmm I think regs[0].which_io is zero here anyways */ | ||
517 | sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static inline void sun4c_init_ss2_cache_bug(void) | ||
522 | { | ||
523 | extern unsigned long start; | ||
524 | |||
525 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || | ||
526 | (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || | ||
527 | (idprom->id_machtype == (SM_SUN4 | SM_4_330)) || | ||
528 | (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { | ||
529 | /* Whee.. */ | ||
530 | printk("SS2 cache bug detected, uncaching trap table page\n"); | ||
531 | sun4c_flush_page((unsigned int) &start); | ||
532 | sun4c_put_pte(((unsigned long) &start), | ||
533 | (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE)); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | /* Addr is always aligned on a page boundary for us already. */ | ||
538 | static int sun4c_map_dma_area(dma_addr_t *pba, unsigned long va, | ||
539 | unsigned long addr, int len) | ||
540 | { | ||
541 | unsigned long page, end; | ||
542 | |||
543 | *pba = addr; | ||
544 | |||
545 | end = PAGE_ALIGN((addr + len)); | ||
546 | while (addr < end) { | ||
547 | page = va; | ||
548 | sun4c_flush_page(page); | ||
549 | page -= PAGE_OFFSET; | ||
550 | page >>= PAGE_SHIFT; | ||
551 | page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY | | ||
552 | _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV); | ||
553 | sun4c_put_pte(addr, page); | ||
554 | addr += PAGE_SIZE; | ||
555 | va += PAGE_SIZE; | ||
556 | } | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static struct page *sun4c_translate_dvma(unsigned long busa) | ||
562 | { | ||
563 | /* Fortunately for us, bus_addr == uncached_virt in sun4c. */ | ||
564 | unsigned long pte = sun4c_get_pte(busa); | ||
565 | return pfn_to_page(pte & SUN4C_PFN_MASK); | ||
566 | } | ||
567 | |||
568 | static void sun4c_unmap_dma_area(unsigned long busa, int len) | ||
569 | { | ||
570 | /* Fortunately for us, bus_addr == uncached_virt in sun4c. */ | ||
571 | /* XXX Implement this */ | ||
572 | } | ||
573 | |||
574 | /* TLB management. */ | ||
575 | |||
576 | /* Don't change this struct without changing entry.S. This is used | ||
577 | * in the in-window kernel fault handler, and you don't want to mess | ||
578 | * with that. (See sun4c_fault in entry.S). | ||
579 | */ | ||
580 | struct sun4c_mmu_entry { | ||
581 | struct sun4c_mmu_entry *next; | ||
582 | struct sun4c_mmu_entry *prev; | ||
583 | unsigned long vaddr; | ||
584 | unsigned char pseg; | ||
585 | unsigned char locked; | ||
586 | |||
587 | /* For user mappings only, and completely hidden from kernel | ||
588 | * TLB miss code. | ||
589 | */ | ||
590 | unsigned char ctx; | ||
591 | struct sun4c_mmu_entry *lru_next; | ||
592 | struct sun4c_mmu_entry *lru_prev; | ||
593 | }; | ||
594 | |||
595 | static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS]; | ||
596 | |||
597 | static void __init sun4c_init_mmu_entry_pool(void) | ||
598 | { | ||
599 | int i; | ||
600 | |||
601 | for (i=0; i < SUN4C_MAX_SEGMAPS; i++) { | ||
602 | mmu_entry_pool[i].pseg = i; | ||
603 | mmu_entry_pool[i].next = NULL; | ||
604 | mmu_entry_pool[i].prev = NULL; | ||
605 | mmu_entry_pool[i].vaddr = 0; | ||
606 | mmu_entry_pool[i].locked = 0; | ||
607 | mmu_entry_pool[i].ctx = 0; | ||
608 | mmu_entry_pool[i].lru_next = NULL; | ||
609 | mmu_entry_pool[i].lru_prev = NULL; | ||
610 | } | ||
611 | mmu_entry_pool[invalid_segment].locked = 1; | ||
612 | } | ||
613 | |||
614 | static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on, | ||
615 | unsigned long bits_off) | ||
616 | { | ||
617 | unsigned long start, end; | ||
618 | |||
619 | end = vaddr + SUN4C_REAL_PGDIR_SIZE; | ||
620 | for (start = vaddr; start < end; start += PAGE_SIZE) | ||
621 | if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID) | ||
622 | sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) & | ||
623 | ~bits_off); | ||
624 | } | ||
625 | |||
626 | static inline void sun4c_init_map_kernelprom(unsigned long kernel_end) | ||
627 | { | ||
628 | unsigned long vaddr; | ||
629 | unsigned char pseg, ctx; | ||
630 | #ifdef CONFIG_SUN4 | ||
631 | /* sun4/110 and 260 have no kadb. */ | ||
632 | if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) && | ||
633 | (idprom->id_machtype != (SM_SUN4 | SM_4_110))) { | ||
634 | #endif | ||
635 | for (vaddr = KADB_DEBUGGER_BEGVM; | ||
636 | vaddr < LINUX_OPPROM_ENDVM; | ||
637 | vaddr += SUN4C_REAL_PGDIR_SIZE) { | ||
638 | pseg = sun4c_get_segmap(vaddr); | ||
639 | if (pseg != invalid_segment) { | ||
640 | mmu_entry_pool[pseg].locked = 1; | ||
641 | for (ctx = 0; ctx < num_contexts; ctx++) | ||
642 | prom_putsegment(ctx, vaddr, pseg); | ||
643 | fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0); | ||
644 | } | ||
645 | } | ||
646 | #ifdef CONFIG_SUN4 | ||
647 | } | ||
648 | #endif | ||
649 | for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) { | ||
650 | pseg = sun4c_get_segmap(vaddr); | ||
651 | mmu_entry_pool[pseg].locked = 1; | ||
652 | for (ctx = 0; ctx < num_contexts; ctx++) | ||
653 | prom_putsegment(ctx, vaddr, pseg); | ||
654 | fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | static void __init sun4c_init_lock_area(unsigned long start, unsigned long end) | ||
659 | { | ||
660 | int i, ctx; | ||
661 | |||
662 | while (start < end) { | ||
663 | for (i = 0; i < invalid_segment; i++) | ||
664 | if (!mmu_entry_pool[i].locked) | ||
665 | break; | ||
666 | mmu_entry_pool[i].locked = 1; | ||
667 | sun4c_init_clean_segmap(i); | ||
668 | for (ctx = 0; ctx < num_contexts; ctx++) | ||
669 | prom_putsegment(ctx, start, mmu_entry_pool[i].pseg); | ||
670 | start += SUN4C_REAL_PGDIR_SIZE; | ||
671 | } | ||
672 | } | ||
673 | |||
674 | /* Don't change this struct without changing entry.S. This is used | ||
675 | * in the in-window kernel fault handler, and you don't want to mess | ||
676 | * with that. (See sun4c_fault in entry.S). | ||
677 | */ | ||
678 | struct sun4c_mmu_ring { | ||
679 | struct sun4c_mmu_entry ringhd; | ||
680 | int num_entries; | ||
681 | }; | ||
682 | |||
683 | static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */ | ||
684 | static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */ | ||
685 | static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */ | ||
686 | struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */ | ||
687 | struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */ | ||
688 | |||
689 | static inline void sun4c_init_rings(void) | ||
690 | { | ||
691 | int i; | ||
692 | |||
693 | for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) { | ||
694 | sun4c_context_ring[i].ringhd.next = | ||
695 | sun4c_context_ring[i].ringhd.prev = | ||
696 | &sun4c_context_ring[i].ringhd; | ||
697 | sun4c_context_ring[i].num_entries = 0; | ||
698 | } | ||
699 | sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev = | ||
700 | &sun4c_ufree_ring.ringhd; | ||
701 | sun4c_ufree_ring.num_entries = 0; | ||
702 | sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev = | ||
703 | &sun4c_ulru_ring.ringhd; | ||
704 | sun4c_ulru_ring.num_entries = 0; | ||
705 | sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev = | ||
706 | &sun4c_kernel_ring.ringhd; | ||
707 | sun4c_kernel_ring.num_entries = 0; | ||
708 | sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev = | ||
709 | &sun4c_kfree_ring.ringhd; | ||
710 | sun4c_kfree_ring.num_entries = 0; | ||
711 | } | ||
712 | |||
713 | static void add_ring(struct sun4c_mmu_ring *ring, | ||
714 | struct sun4c_mmu_entry *entry) | ||
715 | { | ||
716 | struct sun4c_mmu_entry *head = &ring->ringhd; | ||
717 | |||
718 | entry->prev = head; | ||
719 | (entry->next = head->next)->prev = entry; | ||
720 | head->next = entry; | ||
721 | ring->num_entries++; | ||
722 | } | ||
723 | |||
724 | static __inline__ void add_lru(struct sun4c_mmu_entry *entry) | ||
725 | { | ||
726 | struct sun4c_mmu_ring *ring = &sun4c_ulru_ring; | ||
727 | struct sun4c_mmu_entry *head = &ring->ringhd; | ||
728 | |||
729 | entry->lru_next = head; | ||
730 | (entry->lru_prev = head->lru_prev)->lru_next = entry; | ||
731 | head->lru_prev = entry; | ||
732 | } | ||
733 | |||
734 | static void add_ring_ordered(struct sun4c_mmu_ring *ring, | ||
735 | struct sun4c_mmu_entry *entry) | ||
736 | { | ||
737 | struct sun4c_mmu_entry *head = &ring->ringhd; | ||
738 | unsigned long addr = entry->vaddr; | ||
739 | |||
740 | while ((head->next != &ring->ringhd) && (head->next->vaddr < addr)) | ||
741 | head = head->next; | ||
742 | |||
743 | entry->prev = head; | ||
744 | (entry->next = head->next)->prev = entry; | ||
745 | head->next = entry; | ||
746 | ring->num_entries++; | ||
747 | |||
748 | add_lru(entry); | ||
749 | } | ||
750 | |||
751 | static __inline__ void remove_ring(struct sun4c_mmu_ring *ring, | ||
752 | struct sun4c_mmu_entry *entry) | ||
753 | { | ||
754 | struct sun4c_mmu_entry *next = entry->next; | ||
755 | |||
756 | (next->prev = entry->prev)->next = next; | ||
757 | ring->num_entries--; | ||
758 | } | ||
759 | |||
760 | static void remove_lru(struct sun4c_mmu_entry *entry) | ||
761 | { | ||
762 | struct sun4c_mmu_entry *next = entry->lru_next; | ||
763 | |||
764 | (next->lru_prev = entry->lru_prev)->lru_next = next; | ||
765 | } | ||
766 | |||
767 | static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry) | ||
768 | { | ||
769 | remove_ring(sun4c_context_ring+ctx, entry); | ||
770 | remove_lru(entry); | ||
771 | add_ring(&sun4c_ufree_ring, entry); | ||
772 | } | ||
773 | |||
774 | static void free_kernel_entry(struct sun4c_mmu_entry *entry, | ||
775 | struct sun4c_mmu_ring *ring) | ||
776 | { | ||
777 | remove_ring(ring, entry); | ||
778 | add_ring(&sun4c_kfree_ring, entry); | ||
779 | } | ||
780 | |||
781 | static void __init sun4c_init_fill_kernel_ring(int howmany) | ||
782 | { | ||
783 | int i; | ||
784 | |||
785 | while (howmany) { | ||
786 | for (i = 0; i < invalid_segment; i++) | ||
787 | if (!mmu_entry_pool[i].locked) | ||
788 | break; | ||
789 | mmu_entry_pool[i].locked = 1; | ||
790 | sun4c_init_clean_segmap(i); | ||
791 | add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]); | ||
792 | howmany--; | ||
793 | } | ||
794 | } | ||
795 | |||
796 | static void __init sun4c_init_fill_user_ring(void) | ||
797 | { | ||
798 | int i; | ||
799 | |||
800 | for (i = 0; i < invalid_segment; i++) { | ||
801 | if (mmu_entry_pool[i].locked) | ||
802 | continue; | ||
803 | sun4c_init_clean_segmap(i); | ||
804 | add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]); | ||
805 | } | ||
806 | } | ||
807 | |||
808 | static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry) | ||
809 | { | ||
810 | int savectx, ctx; | ||
811 | |||
812 | savectx = sun4c_get_context(); | ||
813 | for (ctx = 0; ctx < num_contexts; ctx++) { | ||
814 | sun4c_set_context(ctx); | ||
815 | sun4c_put_segmap(kentry->vaddr, invalid_segment); | ||
816 | } | ||
817 | sun4c_set_context(savectx); | ||
818 | } | ||
819 | |||
820 | static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry) | ||
821 | { | ||
822 | int savectx, ctx; | ||
823 | |||
824 | savectx = sun4c_get_context(); | ||
825 | for (ctx = 0; ctx < num_contexts; ctx++) { | ||
826 | sun4c_set_context(ctx); | ||
827 | sun4c_put_segmap(kentry->vaddr, kentry->pseg); | ||
828 | } | ||
829 | sun4c_set_context(savectx); | ||
830 | } | ||
831 | |||
832 | #define sun4c_user_unmap(__entry) \ | ||
833 | sun4c_put_segmap((__entry)->vaddr, invalid_segment) | ||
834 | |||
835 | static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx) | ||
836 | { | ||
837 | struct sun4c_mmu_entry *head = &crp->ringhd; | ||
838 | unsigned long flags; | ||
839 | |||
840 | local_irq_save(flags); | ||
841 | if (head->next != head) { | ||
842 | struct sun4c_mmu_entry *entry = head->next; | ||
843 | int savectx = sun4c_get_context(); | ||
844 | |||
845 | flush_user_windows(); | ||
846 | sun4c_set_context(ctx); | ||
847 | sun4c_flush_context(); | ||
848 | do { | ||
849 | struct sun4c_mmu_entry *next = entry->next; | ||
850 | |||
851 | sun4c_user_unmap(entry); | ||
852 | free_user_entry(ctx, entry); | ||
853 | |||
854 | entry = next; | ||
855 | } while (entry != head); | ||
856 | sun4c_set_context(savectx); | ||
857 | } | ||
858 | local_irq_restore(flags); | ||
859 | } | ||
860 | |||
861 | static int sun4c_user_taken_entries; /* This is how much we have. */ | ||
862 | static int max_user_taken_entries; /* This limits us and prevents deadlock. */ | ||
863 | |||
864 | static struct sun4c_mmu_entry *sun4c_kernel_strategy(void) | ||
865 | { | ||
866 | struct sun4c_mmu_entry *this_entry; | ||
867 | |||
868 | /* If some are free, return first one. */ | ||
869 | if (sun4c_kfree_ring.num_entries) { | ||
870 | this_entry = sun4c_kfree_ring.ringhd.next; | ||
871 | return this_entry; | ||
872 | } | ||
873 | |||
874 | /* Else free one up. */ | ||
875 | this_entry = sun4c_kernel_ring.ringhd.prev; | ||
876 | sun4c_flush_segment(this_entry->vaddr); | ||
877 | sun4c_kernel_unmap(this_entry); | ||
878 | free_kernel_entry(this_entry, &sun4c_kernel_ring); | ||
879 | this_entry = sun4c_kfree_ring.ringhd.next; | ||
880 | |||
881 | return this_entry; | ||
882 | } | ||
883 | |||
884 | /* Using this method to free up mmu entries eliminates a lot of | ||
885 | * potential races since we have a kernel that incurs tlb | ||
886 | * replacement faults. There may be performance penalties. | ||
887 | * | ||
888 | * NOTE: Must be called with interrupts disabled. | ||
889 | */ | ||
890 | static struct sun4c_mmu_entry *sun4c_user_strategy(void) | ||
891 | { | ||
892 | struct sun4c_mmu_entry *entry; | ||
893 | unsigned char ctx; | ||
894 | int savectx; | ||
895 | |||
896 | /* If some are free, return first one. */ | ||
897 | if (sun4c_ufree_ring.num_entries) { | ||
898 | entry = sun4c_ufree_ring.ringhd.next; | ||
899 | goto unlink_out; | ||
900 | } | ||
901 | |||
902 | if (sun4c_user_taken_entries) { | ||
903 | entry = sun4c_kernel_strategy(); | ||
904 | sun4c_user_taken_entries--; | ||
905 | goto kunlink_out; | ||
906 | } | ||
907 | |||
908 | /* Grab from the beginning of the LRU list. */ | ||
909 | entry = sun4c_ulru_ring.ringhd.lru_next; | ||
910 | ctx = entry->ctx; | ||
911 | |||
912 | savectx = sun4c_get_context(); | ||
913 | flush_user_windows(); | ||
914 | sun4c_set_context(ctx); | ||
915 | sun4c_flush_segment(entry->vaddr); | ||
916 | sun4c_user_unmap(entry); | ||
917 | remove_ring(sun4c_context_ring + ctx, entry); | ||
918 | remove_lru(entry); | ||
919 | sun4c_set_context(savectx); | ||
920 | |||
921 | return entry; | ||
922 | |||
923 | unlink_out: | ||
924 | remove_ring(&sun4c_ufree_ring, entry); | ||
925 | return entry; | ||
926 | kunlink_out: | ||
927 | remove_ring(&sun4c_kfree_ring, entry); | ||
928 | return entry; | ||
929 | } | ||
930 | |||
931 | /* NOTE: Must be called with interrupts disabled. */ | ||
932 | void sun4c_grow_kernel_ring(void) | ||
933 | { | ||
934 | struct sun4c_mmu_entry *entry; | ||
935 | |||
936 | /* Prevent deadlock condition. */ | ||
937 | if (sun4c_user_taken_entries >= max_user_taken_entries) | ||
938 | return; | ||
939 | |||
940 | if (sun4c_ufree_ring.num_entries) { | ||
941 | entry = sun4c_ufree_ring.ringhd.next; | ||
942 | remove_ring(&sun4c_ufree_ring, entry); | ||
943 | add_ring(&sun4c_kfree_ring, entry); | ||
944 | sun4c_user_taken_entries++; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | /* 2 page buckets for task struct and kernel stack allocation. | ||
949 | * | ||
950 | * TASK_STACK_BEGIN | ||
951 | * bucket[0] | ||
952 | * bucket[1] | ||
953 | * [ ... ] | ||
954 | * bucket[NR_TASK_BUCKETS-1] | ||
955 | * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS) | ||
956 | * | ||
957 | * Each slot looks like: | ||
958 | * | ||
959 | * page 1 -- task struct + beginning of kernel stack | ||
960 | * page 2 -- rest of kernel stack | ||
961 | */ | ||
962 | |||
963 | union task_union *sun4c_bucket[NR_TASK_BUCKETS]; | ||
964 | |||
965 | static int sun4c_lowbucket_avail; | ||
966 | |||
967 | #define BUCKET_EMPTY ((union task_union *) 0) | ||
968 | #define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */ | ||
969 | #define BUCKET_SIZE (1 << BUCKET_SHIFT) | ||
970 | #define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT)) | ||
971 | #define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR) | ||
972 | #define BUCKET_PTE(page) \ | ||
973 | ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL)) | ||
974 | #define BUCKET_PTE_PAGE(pte) \ | ||
975 | (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT)) | ||
976 | |||
977 | static void get_locked_segment(unsigned long addr) | ||
978 | { | ||
979 | struct sun4c_mmu_entry *stolen; | ||
980 | unsigned long flags; | ||
981 | |||
982 | local_irq_save(flags); | ||
983 | addr &= SUN4C_REAL_PGDIR_MASK; | ||
984 | stolen = sun4c_user_strategy(); | ||
985 | max_user_taken_entries--; | ||
986 | stolen->vaddr = addr; | ||
987 | flush_user_windows(); | ||
988 | sun4c_kernel_map(stolen); | ||
989 | local_irq_restore(flags); | ||
990 | } | ||
991 | |||
992 | static void free_locked_segment(unsigned long addr) | ||
993 | { | ||
994 | struct sun4c_mmu_entry *entry; | ||
995 | unsigned long flags; | ||
996 | unsigned char pseg; | ||
997 | |||
998 | local_irq_save(flags); | ||
999 | addr &= SUN4C_REAL_PGDIR_MASK; | ||
1000 | pseg = sun4c_get_segmap(addr); | ||
1001 | entry = &mmu_entry_pool[pseg]; | ||
1002 | |||
1003 | flush_user_windows(); | ||
1004 | sun4c_flush_segment(addr); | ||
1005 | sun4c_kernel_unmap(entry); | ||
1006 | add_ring(&sun4c_ufree_ring, entry); | ||
1007 | max_user_taken_entries++; | ||
1008 | local_irq_restore(flags); | ||
1009 | } | ||
1010 | |||
1011 | static inline void garbage_collect(int entry) | ||
1012 | { | ||
1013 | int start, end; | ||
1014 | |||
1015 | /* 32 buckets per segment... */ | ||
1016 | entry &= ~31; | ||
1017 | start = entry; | ||
1018 | for (end = (start + 32); start < end; start++) | ||
1019 | if (sun4c_bucket[start] != BUCKET_EMPTY) | ||
1020 | return; | ||
1021 | |||
1022 | /* Entire segment empty, release it. */ | ||
1023 | free_locked_segment(BUCKET_ADDR(entry)); | ||
1024 | } | ||
1025 | |||
1026 | static struct thread_info *sun4c_alloc_thread_info(void) | ||
1027 | { | ||
1028 | unsigned long addr, pages; | ||
1029 | int entry; | ||
1030 | |||
1031 | pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER); | ||
1032 | if (!pages) | ||
1033 | return NULL; | ||
1034 | |||
1035 | for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++) | ||
1036 | if (sun4c_bucket[entry] == BUCKET_EMPTY) | ||
1037 | break; | ||
1038 | if (entry == NR_TASK_BUCKETS) { | ||
1039 | free_pages(pages, THREAD_INFO_ORDER); | ||
1040 | return NULL; | ||
1041 | } | ||
1042 | if (entry >= sun4c_lowbucket_avail) | ||
1043 | sun4c_lowbucket_avail = entry + 1; | ||
1044 | |||
1045 | addr = BUCKET_ADDR(entry); | ||
1046 | sun4c_bucket[entry] = (union task_union *) addr; | ||
1047 | if(sun4c_get_segmap(addr) == invalid_segment) | ||
1048 | get_locked_segment(addr); | ||
1049 | |||
1050 | /* We are changing the virtual color of the page(s) | ||
1051 | * so we must flush the cache to guarantee consistency. | ||
1052 | */ | ||
1053 | sun4c_flush_page(pages); | ||
1054 | #ifndef CONFIG_SUN4 | ||
1055 | sun4c_flush_page(pages + PAGE_SIZE); | ||
1056 | #endif | ||
1057 | |||
1058 | sun4c_put_pte(addr, BUCKET_PTE(pages)); | ||
1059 | #ifndef CONFIG_SUN4 | ||
1060 | sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE)); | ||
1061 | #endif | ||
1062 | |||
1063 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
1064 | memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER); | ||
1065 | #endif /* DEBUG_STACK_USAGE */ | ||
1066 | |||
1067 | return (struct thread_info *) addr; | ||
1068 | } | ||
1069 | |||
1070 | static void sun4c_free_thread_info(struct thread_info *ti) | ||
1071 | { | ||
1072 | unsigned long tiaddr = (unsigned long) ti; | ||
1073 | unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr)); | ||
1074 | int entry = BUCKET_NUM(tiaddr); | ||
1075 | |||
1076 | /* We are deleting a mapping, so the flush here is mandatory. */ | ||
1077 | sun4c_flush_page(tiaddr); | ||
1078 | #ifndef CONFIG_SUN4 | ||
1079 | sun4c_flush_page(tiaddr + PAGE_SIZE); | ||
1080 | #endif | ||
1081 | sun4c_put_pte(tiaddr, 0); | ||
1082 | #ifndef CONFIG_SUN4 | ||
1083 | sun4c_put_pte(tiaddr + PAGE_SIZE, 0); | ||
1084 | #endif | ||
1085 | sun4c_bucket[entry] = BUCKET_EMPTY; | ||
1086 | if (entry < sun4c_lowbucket_avail) | ||
1087 | sun4c_lowbucket_avail = entry; | ||
1088 | |||
1089 | free_pages(pages, THREAD_INFO_ORDER); | ||
1090 | garbage_collect(entry); | ||
1091 | } | ||
1092 | |||
1093 | static void __init sun4c_init_buckets(void) | ||
1094 | { | ||
1095 | int entry; | ||
1096 | |||
1097 | if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) { | ||
1098 | extern void thread_info_size_is_bolixed_pete(void); | ||
1099 | thread_info_size_is_bolixed_pete(); | ||
1100 | } | ||
1101 | |||
1102 | for (entry = 0; entry < NR_TASK_BUCKETS; entry++) | ||
1103 | sun4c_bucket[entry] = BUCKET_EMPTY; | ||
1104 | sun4c_lowbucket_avail = 0; | ||
1105 | } | ||
1106 | |||
1107 | static unsigned long sun4c_iobuffer_start; | ||
1108 | static unsigned long sun4c_iobuffer_end; | ||
1109 | static unsigned long sun4c_iobuffer_high; | ||
1110 | static unsigned long *sun4c_iobuffer_map; | ||
1111 | static int iobuffer_map_size; | ||
1112 | |||
1113 | /* | ||
1114 | * Alias our pages so they do not cause a trap. | ||
1115 | * Also one page may be aliased into several I/O areas and we may | ||
1116 | * finish these I/O separately. | ||
1117 | */ | ||
1118 | static char *sun4c_lockarea(char *vaddr, unsigned long size) | ||
1119 | { | ||
1120 | unsigned long base, scan; | ||
1121 | unsigned long npages; | ||
1122 | unsigned long vpage; | ||
1123 | unsigned long pte; | ||
1124 | unsigned long apage; | ||
1125 | unsigned long high; | ||
1126 | unsigned long flags; | ||
1127 | |||
1128 | npages = (((unsigned long)vaddr & ~PAGE_MASK) + | ||
1129 | size + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
1130 | |||
1131 | scan = 0; | ||
1132 | local_irq_save(flags); | ||
1133 | for (;;) { | ||
1134 | scan = find_next_zero_bit(sun4c_iobuffer_map, | ||
1135 | iobuffer_map_size, scan); | ||
1136 | if ((base = scan) + npages > iobuffer_map_size) goto abend; | ||
1137 | for (;;) { | ||
1138 | if (scan >= base + npages) goto found; | ||
1139 | if (test_bit(scan, sun4c_iobuffer_map)) break; | ||
1140 | scan++; | ||
1141 | } | ||
1142 | } | ||
1143 | |||
1144 | found: | ||
1145 | high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start; | ||
1146 | high = SUN4C_REAL_PGDIR_ALIGN(high); | ||
1147 | while (high > sun4c_iobuffer_high) { | ||
1148 | get_locked_segment(sun4c_iobuffer_high); | ||
1149 | sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE; | ||
1150 | } | ||
1151 | |||
1152 | vpage = ((unsigned long) vaddr) & PAGE_MASK; | ||
1153 | for (scan = base; scan < base+npages; scan++) { | ||
1154 | pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT); | ||
1155 | pte |= pgprot_val(SUN4C_PAGE_KERNEL); | ||
1156 | pte |= _SUN4C_PAGE_NOCACHE; | ||
1157 | set_bit(scan, sun4c_iobuffer_map); | ||
1158 | apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start; | ||
1159 | |||
1160 | /* Flush original mapping so we see the right things later. */ | ||
1161 | sun4c_flush_page(vpage); | ||
1162 | |||
1163 | sun4c_put_pte(apage, pte); | ||
1164 | vpage += PAGE_SIZE; | ||
1165 | } | ||
1166 | local_irq_restore(flags); | ||
1167 | return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start + | ||
1168 | (((unsigned long) vaddr) & ~PAGE_MASK)); | ||
1169 | |||
1170 | abend: | ||
1171 | local_irq_restore(flags); | ||
1172 | printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size); | ||
1173 | panic("Out of iobuffer table"); | ||
1174 | return NULL; | ||
1175 | } | ||
1176 | |||
1177 | static void sun4c_unlockarea(char *vaddr, unsigned long size) | ||
1178 | { | ||
1179 | unsigned long vpage, npages; | ||
1180 | unsigned long flags; | ||
1181 | int scan, high; | ||
1182 | |||
1183 | vpage = (unsigned long)vaddr & PAGE_MASK; | ||
1184 | npages = (((unsigned long)vaddr & ~PAGE_MASK) + | ||
1185 | size + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||
1186 | |||
1187 | local_irq_save(flags); | ||
1188 | while (npages != 0) { | ||
1189 | --npages; | ||
1190 | |||
1191 | /* This mapping is marked non-cachable, no flush necessary. */ | ||
1192 | sun4c_put_pte(vpage, 0); | ||
1193 | clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT, | ||
1194 | sun4c_iobuffer_map); | ||
1195 | vpage += PAGE_SIZE; | ||
1196 | } | ||
1197 | |||
1198 | /* garbage collect */ | ||
1199 | scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT; | ||
1200 | while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5]) | ||
1201 | scan -= 32; | ||
1202 | scan += 32; | ||
1203 | high = sun4c_iobuffer_start + (scan << PAGE_SHIFT); | ||
1204 | high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE; | ||
1205 | while (high < sun4c_iobuffer_high) { | ||
1206 | sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE; | ||
1207 | free_locked_segment(sun4c_iobuffer_high); | ||
1208 | } | ||
1209 | local_irq_restore(flags); | ||
1210 | } | ||
1211 | |||
1212 | /* Note the scsi code at init time passes to here buffers | ||
1213 | * which sit on the kernel stack, those are already locked | ||
1214 | * by implication and fool the page locking code above | ||
1215 | * if passed to by mistake. | ||
1216 | */ | ||
1217 | static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus) | ||
1218 | { | ||
1219 | unsigned long page; | ||
1220 | |||
1221 | page = ((unsigned long)bufptr) & PAGE_MASK; | ||
1222 | if (!virt_addr_valid(page)) { | ||
1223 | sun4c_flush_page(page); | ||
1224 | return (__u32)bufptr; /* already locked */ | ||
1225 | } | ||
1226 | return (__u32)sun4c_lockarea(bufptr, len); | ||
1227 | } | ||
1228 | |||
1229 | static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
1230 | { | ||
1231 | while (sz != 0) { | ||
1232 | --sz; | ||
1233 | sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); | ||
1234 | sg[sz].dvma_length = sg[sz].length; | ||
1235 | } | ||
1236 | } | ||
1237 | |||
1238 | static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus) | ||
1239 | { | ||
1240 | if (bufptr < sun4c_iobuffer_start) | ||
1241 | return; /* On kernel stack or similar, see above */ | ||
1242 | sun4c_unlockarea((char *)bufptr, len); | ||
1243 | } | ||
1244 | |||
1245 | static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus) | ||
1246 | { | ||
1247 | while (sz != 0) { | ||
1248 | --sz; | ||
1249 | sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | #define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */ | ||
1254 | #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) | ||
1255 | |||
1256 | struct vm_area_struct sun4c_kstack_vma; | ||
1257 | |||
1258 | static void __init sun4c_init_lock_areas(void) | ||
1259 | { | ||
1260 | unsigned long sun4c_taskstack_start; | ||
1261 | unsigned long sun4c_taskstack_end; | ||
1262 | int bitmap_size; | ||
1263 | |||
1264 | sun4c_init_buckets(); | ||
1265 | sun4c_taskstack_start = SUN4C_LOCK_VADDR; | ||
1266 | sun4c_taskstack_end = (sun4c_taskstack_start + | ||
1267 | (TASK_ENTRY_SIZE * NR_TASK_BUCKETS)); | ||
1268 | if (sun4c_taskstack_end >= SUN4C_LOCK_END) { | ||
1269 | prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n"); | ||
1270 | prom_halt(); | ||
1271 | } | ||
1272 | |||
1273 | sun4c_iobuffer_start = sun4c_iobuffer_high = | ||
1274 | SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end); | ||
1275 | sun4c_iobuffer_end = SUN4C_LOCK_END; | ||
1276 | bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT; | ||
1277 | bitmap_size = (bitmap_size + 7) >> 3; | ||
1278 | bitmap_size = LONG_ALIGN(bitmap_size); | ||
1279 | iobuffer_map_size = bitmap_size << 3; | ||
1280 | sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL); | ||
1281 | memset((void *) sun4c_iobuffer_map, 0, bitmap_size); | ||
1282 | |||
1283 | sun4c_kstack_vma.vm_mm = &init_mm; | ||
1284 | sun4c_kstack_vma.vm_start = sun4c_taskstack_start; | ||
1285 | sun4c_kstack_vma.vm_end = sun4c_taskstack_end; | ||
1286 | sun4c_kstack_vma.vm_page_prot = PAGE_SHARED; | ||
1287 | sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC; | ||
1288 | insert_vm_struct(&init_mm, &sun4c_kstack_vma); | ||
1289 | } | ||
1290 | |||
1291 | /* Cache flushing on the sun4c. */ | ||
1292 | static void sun4c_flush_cache_all(void) | ||
1293 | { | ||
1294 | unsigned long begin, end; | ||
1295 | |||
1296 | flush_user_windows(); | ||
1297 | begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE); | ||
1298 | end = (begin + SUN4C_VAC_SIZE); | ||
1299 | |||
1300 | if (sun4c_vacinfo.linesize == 32) { | ||
1301 | while (begin < end) { | ||
1302 | __asm__ __volatile__( | ||
1303 | "ld [%0 + 0x00], %%g0\n\t" | ||
1304 | "ld [%0 + 0x20], %%g0\n\t" | ||
1305 | "ld [%0 + 0x40], %%g0\n\t" | ||
1306 | "ld [%0 + 0x60], %%g0\n\t" | ||
1307 | "ld [%0 + 0x80], %%g0\n\t" | ||
1308 | "ld [%0 + 0xa0], %%g0\n\t" | ||
1309 | "ld [%0 + 0xc0], %%g0\n\t" | ||
1310 | "ld [%0 + 0xe0], %%g0\n\t" | ||
1311 | "ld [%0 + 0x100], %%g0\n\t" | ||
1312 | "ld [%0 + 0x120], %%g0\n\t" | ||
1313 | "ld [%0 + 0x140], %%g0\n\t" | ||
1314 | "ld [%0 + 0x160], %%g0\n\t" | ||
1315 | "ld [%0 + 0x180], %%g0\n\t" | ||
1316 | "ld [%0 + 0x1a0], %%g0\n\t" | ||
1317 | "ld [%0 + 0x1c0], %%g0\n\t" | ||
1318 | "ld [%0 + 0x1e0], %%g0\n" | ||
1319 | : : "r" (begin)); | ||
1320 | begin += 512; | ||
1321 | } | ||
1322 | } else { | ||
1323 | while (begin < end) { | ||
1324 | __asm__ __volatile__( | ||
1325 | "ld [%0 + 0x00], %%g0\n\t" | ||
1326 | "ld [%0 + 0x10], %%g0\n\t" | ||
1327 | "ld [%0 + 0x20], %%g0\n\t" | ||
1328 | "ld [%0 + 0x30], %%g0\n\t" | ||
1329 | "ld [%0 + 0x40], %%g0\n\t" | ||
1330 | "ld [%0 + 0x50], %%g0\n\t" | ||
1331 | "ld [%0 + 0x60], %%g0\n\t" | ||
1332 | "ld [%0 + 0x70], %%g0\n\t" | ||
1333 | "ld [%0 + 0x80], %%g0\n\t" | ||
1334 | "ld [%0 + 0x90], %%g0\n\t" | ||
1335 | "ld [%0 + 0xa0], %%g0\n\t" | ||
1336 | "ld [%0 + 0xb0], %%g0\n\t" | ||
1337 | "ld [%0 + 0xc0], %%g0\n\t" | ||
1338 | "ld [%0 + 0xd0], %%g0\n\t" | ||
1339 | "ld [%0 + 0xe0], %%g0\n\t" | ||
1340 | "ld [%0 + 0xf0], %%g0\n" | ||
1341 | : : "r" (begin)); | ||
1342 | begin += 256; | ||
1343 | } | ||
1344 | } | ||
1345 | } | ||
1346 | |||
1347 | static void sun4c_flush_cache_mm(struct mm_struct *mm) | ||
1348 | { | ||
1349 | int new_ctx = mm->context; | ||
1350 | |||
1351 | if (new_ctx != NO_CONTEXT) { | ||
1352 | flush_user_windows(); | ||
1353 | |||
1354 | if (sun4c_context_ring[new_ctx].num_entries) { | ||
1355 | struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; | ||
1356 | unsigned long flags; | ||
1357 | |||
1358 | local_irq_save(flags); | ||
1359 | if (head->next != head) { | ||
1360 | struct sun4c_mmu_entry *entry = head->next; | ||
1361 | int savectx = sun4c_get_context(); | ||
1362 | |||
1363 | sun4c_set_context(new_ctx); | ||
1364 | sun4c_flush_context(); | ||
1365 | do { | ||
1366 | struct sun4c_mmu_entry *next = entry->next; | ||
1367 | |||
1368 | sun4c_user_unmap(entry); | ||
1369 | free_user_entry(new_ctx, entry); | ||
1370 | |||
1371 | entry = next; | ||
1372 | } while (entry != head); | ||
1373 | sun4c_set_context(savectx); | ||
1374 | } | ||
1375 | local_irq_restore(flags); | ||
1376 | } | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
1381 | { | ||
1382 | struct mm_struct *mm = vma->vm_mm; | ||
1383 | int new_ctx = mm->context; | ||
1384 | |||
1385 | if (new_ctx != NO_CONTEXT) { | ||
1386 | struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; | ||
1387 | struct sun4c_mmu_entry *entry; | ||
1388 | unsigned long flags; | ||
1389 | |||
1390 | flush_user_windows(); | ||
1391 | |||
1392 | local_irq_save(flags); | ||
1393 | /* All user segmap chains are ordered on entry->vaddr. */ | ||
1394 | for (entry = head->next; | ||
1395 | (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); | ||
1396 | entry = entry->next) | ||
1397 | ; | ||
1398 | |||
1399 | /* Tracing various job mixtures showed that this conditional | ||
1400 | * only passes ~35% of the time for most worse case situations, | ||
1401 | * therefore we avoid all of this gross overhead ~65% of the time. | ||
1402 | */ | ||
1403 | if ((entry != head) && (entry->vaddr < end)) { | ||
1404 | int octx = sun4c_get_context(); | ||
1405 | sun4c_set_context(new_ctx); | ||
1406 | |||
1407 | /* At this point, always, (start >= entry->vaddr) and | ||
1408 | * (entry->vaddr < end), once the latter condition | ||
1409 | * ceases to hold, or we hit the end of the list, we | ||
1410 | * exit the loop. The ordering of all user allocated | ||
1411 | * segmaps makes this all work out so beautifully. | ||
1412 | */ | ||
1413 | do { | ||
1414 | struct sun4c_mmu_entry *next = entry->next; | ||
1415 | unsigned long realend; | ||
1416 | |||
1417 | /* "realstart" is always >= entry->vaddr */ | ||
1418 | realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE; | ||
1419 | if (end < realend) | ||
1420 | realend = end; | ||
1421 | if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) { | ||
1422 | unsigned long page = entry->vaddr; | ||
1423 | while (page < realend) { | ||
1424 | sun4c_flush_page(page); | ||
1425 | page += PAGE_SIZE; | ||
1426 | } | ||
1427 | } else { | ||
1428 | sun4c_flush_segment(entry->vaddr); | ||
1429 | sun4c_user_unmap(entry); | ||
1430 | free_user_entry(new_ctx, entry); | ||
1431 | } | ||
1432 | entry = next; | ||
1433 | } while ((entry != head) && (entry->vaddr < end)); | ||
1434 | sun4c_set_context(octx); | ||
1435 | } | ||
1436 | local_irq_restore(flags); | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | ||
1441 | { | ||
1442 | struct mm_struct *mm = vma->vm_mm; | ||
1443 | int new_ctx = mm->context; | ||
1444 | |||
1445 | /* Sun4c has no separate I/D caches so cannot optimize for non | ||
1446 | * text page flushes. | ||
1447 | */ | ||
1448 | if (new_ctx != NO_CONTEXT) { | ||
1449 | int octx = sun4c_get_context(); | ||
1450 | unsigned long flags; | ||
1451 | |||
1452 | flush_user_windows(); | ||
1453 | local_irq_save(flags); | ||
1454 | sun4c_set_context(new_ctx); | ||
1455 | sun4c_flush_page(page); | ||
1456 | sun4c_set_context(octx); | ||
1457 | local_irq_restore(flags); | ||
1458 | } | ||
1459 | } | ||
1460 | |||
1461 | static void sun4c_flush_page_to_ram(unsigned long page) | ||
1462 | { | ||
1463 | unsigned long flags; | ||
1464 | |||
1465 | local_irq_save(flags); | ||
1466 | sun4c_flush_page(page); | ||
1467 | local_irq_restore(flags); | ||
1468 | } | ||
1469 | |||
1470 | /* Sun4c cache is unified, both instructions and data live there, so | ||
1471 | * no need to flush the on-stack instructions for new signal handlers. | ||
1472 | */ | ||
1473 | static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | ||
1474 | { | ||
1475 | } | ||
1476 | |||
1477 | /* TLB flushing on the sun4c. These routines count on the cache | ||
1478 | * flushing code to flush the user register windows so that we need | ||
1479 | * not do so when we get here. | ||
1480 | */ | ||
1481 | |||
1482 | static void sun4c_flush_tlb_all(void) | ||
1483 | { | ||
1484 | struct sun4c_mmu_entry *this_entry, *next_entry; | ||
1485 | unsigned long flags; | ||
1486 | int savectx, ctx; | ||
1487 | |||
1488 | local_irq_save(flags); | ||
1489 | this_entry = sun4c_kernel_ring.ringhd.next; | ||
1490 | savectx = sun4c_get_context(); | ||
1491 | flush_user_windows(); | ||
1492 | while (sun4c_kernel_ring.num_entries) { | ||
1493 | next_entry = this_entry->next; | ||
1494 | sun4c_flush_segment(this_entry->vaddr); | ||
1495 | for (ctx = 0; ctx < num_contexts; ctx++) { | ||
1496 | sun4c_set_context(ctx); | ||
1497 | sun4c_put_segmap(this_entry->vaddr, invalid_segment); | ||
1498 | } | ||
1499 | free_kernel_entry(this_entry, &sun4c_kernel_ring); | ||
1500 | this_entry = next_entry; | ||
1501 | } | ||
1502 | sun4c_set_context(savectx); | ||
1503 | local_irq_restore(flags); | ||
1504 | } | ||
1505 | |||
1506 | static void sun4c_flush_tlb_mm(struct mm_struct *mm) | ||
1507 | { | ||
1508 | int new_ctx = mm->context; | ||
1509 | |||
1510 | if (new_ctx != NO_CONTEXT) { | ||
1511 | struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; | ||
1512 | unsigned long flags; | ||
1513 | |||
1514 | local_irq_save(flags); | ||
1515 | if (head->next != head) { | ||
1516 | struct sun4c_mmu_entry *entry = head->next; | ||
1517 | int savectx = sun4c_get_context(); | ||
1518 | |||
1519 | sun4c_set_context(new_ctx); | ||
1520 | sun4c_flush_context(); | ||
1521 | do { | ||
1522 | struct sun4c_mmu_entry *next = entry->next; | ||
1523 | |||
1524 | sun4c_user_unmap(entry); | ||
1525 | free_user_entry(new_ctx, entry); | ||
1526 | |||
1527 | entry = next; | ||
1528 | } while (entry != head); | ||
1529 | sun4c_set_context(savectx); | ||
1530 | } | ||
1531 | local_irq_restore(flags); | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
1536 | { | ||
1537 | struct mm_struct *mm = vma->vm_mm; | ||
1538 | int new_ctx = mm->context; | ||
1539 | |||
1540 | if (new_ctx != NO_CONTEXT) { | ||
1541 | struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd; | ||
1542 | struct sun4c_mmu_entry *entry; | ||
1543 | unsigned long flags; | ||
1544 | |||
1545 | local_irq_save(flags); | ||
1546 | /* See commentary in sun4c_flush_cache_range(). */ | ||
1547 | for (entry = head->next; | ||
1548 | (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start); | ||
1549 | entry = entry->next) | ||
1550 | ; | ||
1551 | |||
1552 | if ((entry != head) && (entry->vaddr < end)) { | ||
1553 | int octx = sun4c_get_context(); | ||
1554 | |||
1555 | sun4c_set_context(new_ctx); | ||
1556 | do { | ||
1557 | struct sun4c_mmu_entry *next = entry->next; | ||
1558 | |||
1559 | sun4c_flush_segment(entry->vaddr); | ||
1560 | sun4c_user_unmap(entry); | ||
1561 | free_user_entry(new_ctx, entry); | ||
1562 | |||
1563 | entry = next; | ||
1564 | } while ((entry != head) && (entry->vaddr < end)); | ||
1565 | sun4c_set_context(octx); | ||
1566 | } | ||
1567 | local_irq_restore(flags); | ||
1568 | } | ||
1569 | } | ||
1570 | |||
1571 | static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
1572 | { | ||
1573 | struct mm_struct *mm = vma->vm_mm; | ||
1574 | int new_ctx = mm->context; | ||
1575 | |||
1576 | if (new_ctx != NO_CONTEXT) { | ||
1577 | int savectx = sun4c_get_context(); | ||
1578 | unsigned long flags; | ||
1579 | |||
1580 | local_irq_save(flags); | ||
1581 | sun4c_set_context(new_ctx); | ||
1582 | page &= PAGE_MASK; | ||
1583 | sun4c_flush_page(page); | ||
1584 | sun4c_put_pte(page, 0); | ||
1585 | sun4c_set_context(savectx); | ||
1586 | local_irq_restore(flags); | ||
1587 | } | ||
1588 | } | ||
1589 | |||
1590 | static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr) | ||
1591 | { | ||
1592 | unsigned long page_entry; | ||
1593 | |||
1594 | page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK); | ||
1595 | page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT)); | ||
1596 | sun4c_put_pte(virt_addr, page_entry); | ||
1597 | } | ||
1598 | |||
1599 | static void sun4c_mapiorange(unsigned int bus, unsigned long xpa, | ||
1600 | unsigned long xva, unsigned int len) | ||
1601 | { | ||
1602 | while (len != 0) { | ||
1603 | len -= PAGE_SIZE; | ||
1604 | sun4c_mapioaddr(xpa, xva); | ||
1605 | xva += PAGE_SIZE; | ||
1606 | xpa += PAGE_SIZE; | ||
1607 | } | ||
1608 | } | ||
1609 | |||
1610 | static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len) | ||
1611 | { | ||
1612 | while (len != 0) { | ||
1613 | len -= PAGE_SIZE; | ||
1614 | sun4c_put_pte(virt_addr, 0); | ||
1615 | virt_addr += PAGE_SIZE; | ||
1616 | } | ||
1617 | } | ||
1618 | |||
1619 | static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) | ||
1620 | { | ||
1621 | struct ctx_list *ctxp; | ||
1622 | |||
1623 | ctxp = ctx_free.next; | ||
1624 | if (ctxp != &ctx_free) { | ||
1625 | remove_from_ctx_list(ctxp); | ||
1626 | add_to_used_ctxlist(ctxp); | ||
1627 | mm->context = ctxp->ctx_number; | ||
1628 | ctxp->ctx_mm = mm; | ||
1629 | return; | ||
1630 | } | ||
1631 | ctxp = ctx_used.next; | ||
1632 | if (ctxp->ctx_mm == old_mm) | ||
1633 | ctxp = ctxp->next; | ||
1634 | remove_from_ctx_list(ctxp); | ||
1635 | add_to_used_ctxlist(ctxp); | ||
1636 | ctxp->ctx_mm->context = NO_CONTEXT; | ||
1637 | ctxp->ctx_mm = mm; | ||
1638 | mm->context = ctxp->ctx_number; | ||
1639 | sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number], | ||
1640 | ctxp->ctx_number); | ||
1641 | } | ||
1642 | |||
1643 | /* Switch the current MM context. */ | ||
1644 | static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) | ||
1645 | { | ||
1646 | struct ctx_list *ctx; | ||
1647 | int dirty = 0; | ||
1648 | |||
1649 | if (mm->context == NO_CONTEXT) { | ||
1650 | dirty = 1; | ||
1651 | sun4c_alloc_context(old_mm, mm); | ||
1652 | } else { | ||
1653 | /* Update the LRU ring of contexts. */ | ||
1654 | ctx = ctx_list_pool + mm->context; | ||
1655 | remove_from_ctx_list(ctx); | ||
1656 | add_to_used_ctxlist(ctx); | ||
1657 | } | ||
1658 | if (dirty || old_mm != mm) | ||
1659 | sun4c_set_context(mm->context); | ||
1660 | } | ||
1661 | |||
1662 | static void sun4c_destroy_context(struct mm_struct *mm) | ||
1663 | { | ||
1664 | struct ctx_list *ctx_old; | ||
1665 | |||
1666 | if (mm->context != NO_CONTEXT) { | ||
1667 | sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context); | ||
1668 | ctx_old = ctx_list_pool + mm->context; | ||
1669 | remove_from_ctx_list(ctx_old); | ||
1670 | add_to_free_ctxlist(ctx_old); | ||
1671 | mm->context = NO_CONTEXT; | ||
1672 | } | ||
1673 | } | ||
1674 | |||
1675 | static void sun4c_mmu_info(struct seq_file *m) | ||
1676 | { | ||
1677 | int used_user_entries, i; | ||
1678 | |||
1679 | used_user_entries = 0; | ||
1680 | for (i = 0; i < num_contexts; i++) | ||
1681 | used_user_entries += sun4c_context_ring[i].num_entries; | ||
1682 | |||
1683 | seq_printf(m, | ||
1684 | "vacsize\t\t: %d bytes\n" | ||
1685 | "vachwflush\t: %s\n" | ||
1686 | "vaclinesize\t: %d bytes\n" | ||
1687 | "mmuctxs\t\t: %d\n" | ||
1688 | "mmupsegs\t: %d\n" | ||
1689 | "kernelpsegs\t: %d\n" | ||
1690 | "kfreepsegs\t: %d\n" | ||
1691 | "usedpsegs\t: %d\n" | ||
1692 | "ufreepsegs\t: %d\n" | ||
1693 | "user_taken\t: %d\n" | ||
1694 | "max_taken\t: %d\n", | ||
1695 | sun4c_vacinfo.num_bytes, | ||
1696 | (sun4c_vacinfo.do_hwflushes ? "yes" : "no"), | ||
1697 | sun4c_vacinfo.linesize, | ||
1698 | num_contexts, | ||
1699 | (invalid_segment + 1), | ||
1700 | sun4c_kernel_ring.num_entries, | ||
1701 | sun4c_kfree_ring.num_entries, | ||
1702 | used_user_entries, | ||
1703 | sun4c_ufree_ring.num_entries, | ||
1704 | sun4c_user_taken_entries, | ||
1705 | max_user_taken_entries); | ||
1706 | } | ||
1707 | |||
1708 | /* Nothing below here should touch the mmu hardware nor the mmu_entry | ||
1709 | * data structures. | ||
1710 | */ | ||
1711 | |||
1712 | /* First the functions which the mid-level code uses to directly | ||
1713 | * manipulate the software page tables. Some defines since we are | ||
1714 | * emulating the i386 page directory layout. | ||
1715 | */ | ||
1716 | #define PGD_PRESENT 0x001 | ||
1717 | #define PGD_RW 0x002 | ||
1718 | #define PGD_USER 0x004 | ||
1719 | #define PGD_ACCESSED 0x020 | ||
1720 | #define PGD_DIRTY 0x040 | ||
1721 | #define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY) | ||
1722 | |||
1723 | static void sun4c_set_pte(pte_t *ptep, pte_t pte) | ||
1724 | { | ||
1725 | *ptep = pte; | ||
1726 | } | ||
1727 | |||
1728 | static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp) | ||
1729 | { | ||
1730 | } | ||
1731 | |||
1732 | static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep) | ||
1733 | { | ||
1734 | pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep; | ||
1735 | } | ||
1736 | |||
1737 | static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep) | ||
1738 | { | ||
1739 | if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */ | ||
1740 | pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep); | ||
1741 | } | ||
1742 | |||
1743 | static int sun4c_pte_present(pte_t pte) | ||
1744 | { | ||
1745 | return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0); | ||
1746 | } | ||
1747 | static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); } | ||
1748 | |||
1749 | static int sun4c_pte_read(pte_t pte) | ||
1750 | { | ||
1751 | return (pte_val(pte) & _SUN4C_PAGE_READ); | ||
1752 | } | ||
1753 | |||
1754 | static int sun4c_pmd_bad(pmd_t pmd) | ||
1755 | { | ||
1756 | return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) || | ||
1757 | (!virt_addr_valid(pmd_val(pmd)))); | ||
1758 | } | ||
1759 | |||
1760 | static int sun4c_pmd_present(pmd_t pmd) | ||
1761 | { | ||
1762 | return ((pmd_val(pmd) & PGD_PRESENT) != 0); | ||
1763 | } | ||
1764 | |||
1765 | #if 0 /* if PMD takes one word */ | ||
1766 | static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); } | ||
1767 | #else /* if pmd_t is a longish aggregate */ | ||
1768 | static void sun4c_pmd_clear(pmd_t *pmdp) { | ||
1769 | memset((void *)pmdp, 0, sizeof(pmd_t)); | ||
1770 | } | ||
1771 | #endif | ||
1772 | |||
1773 | static int sun4c_pgd_none(pgd_t pgd) { return 0; } | ||
1774 | static int sun4c_pgd_bad(pgd_t pgd) { return 0; } | ||
1775 | static int sun4c_pgd_present(pgd_t pgd) { return 1; } | ||
1776 | static void sun4c_pgd_clear(pgd_t * pgdp) { } | ||
1777 | |||
1778 | /* | ||
1779 | * The following only work if pte_present() is true. | ||
1780 | * Undefined behaviour if not.. | ||
1781 | */ | ||
1782 | static pte_t sun4c_pte_mkwrite(pte_t pte) | ||
1783 | { | ||
1784 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE); | ||
1785 | if (pte_val(pte) & _SUN4C_PAGE_MODIFIED) | ||
1786 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE); | ||
1787 | return pte; | ||
1788 | } | ||
1789 | |||
1790 | static pte_t sun4c_pte_mkdirty(pte_t pte) | ||
1791 | { | ||
1792 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED); | ||
1793 | if (pte_val(pte) & _SUN4C_PAGE_WRITE) | ||
1794 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE); | ||
1795 | return pte; | ||
1796 | } | ||
1797 | |||
1798 | static pte_t sun4c_pte_mkyoung(pte_t pte) | ||
1799 | { | ||
1800 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED); | ||
1801 | if (pte_val(pte) & _SUN4C_PAGE_READ) | ||
1802 | pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ); | ||
1803 | return pte; | ||
1804 | } | ||
1805 | |||
1806 | /* | ||
1807 | * Conversion functions: convert a page and protection to a page entry, | ||
1808 | * and a page entry and page directory to the page they refer to. | ||
1809 | */ | ||
1810 | static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot) | ||
1811 | { | ||
1812 | return __pte(page_to_pfn(page) | pgprot_val(pgprot)); | ||
1813 | } | ||
1814 | |||
1815 | static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot) | ||
1816 | { | ||
1817 | return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot)); | ||
1818 | } | ||
1819 | |||
1820 | static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) | ||
1821 | { | ||
1822 | return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot)); | ||
1823 | } | ||
1824 | |||
1825 | static unsigned long sun4c_pte_pfn(pte_t pte) | ||
1826 | { | ||
1827 | return pte_val(pte) & SUN4C_PFN_MASK; | ||
1828 | } | ||
1829 | |||
1830 | static pte_t sun4c_pgoff_to_pte(unsigned long pgoff) | ||
1831 | { | ||
1832 | return __pte(pgoff | _SUN4C_PAGE_FILE); | ||
1833 | } | ||
1834 | |||
1835 | static unsigned long sun4c_pte_to_pgoff(pte_t pte) | ||
1836 | { | ||
1837 | return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1); | ||
1838 | } | ||
1839 | |||
1840 | |||
1841 | static __inline__ unsigned long sun4c_pmd_page_v(pmd_t pmd) | ||
1842 | { | ||
1843 | return (pmd_val(pmd) & PAGE_MASK); | ||
1844 | } | ||
1845 | |||
1846 | static struct page *sun4c_pmd_page(pmd_t pmd) | ||
1847 | { | ||
1848 | return virt_to_page(sun4c_pmd_page_v(pmd)); | ||
1849 | } | ||
1850 | |||
1851 | static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; } | ||
1852 | |||
1853 | /* to find an entry in a page-table-directory */ | ||
1854 | static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address) | ||
1855 | { | ||
1856 | return mm->pgd + (address >> SUN4C_PGDIR_SHIFT); | ||
1857 | } | ||
1858 | |||
1859 | /* Find an entry in the second-level page table.. */ | ||
1860 | static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address) | ||
1861 | { | ||
1862 | return (pmd_t *) dir; | ||
1863 | } | ||
1864 | |||
1865 | /* Find an entry in the third-level page table.. */ | ||
1866 | pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address) | ||
1867 | { | ||
1868 | return (pte_t *) sun4c_pmd_page_v(*dir) + | ||
1869 | ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1)); | ||
1870 | } | ||
1871 | |||
1872 | static unsigned long sun4c_swp_type(swp_entry_t entry) | ||
1873 | { | ||
1874 | return (entry.val & SUN4C_SWP_TYPE_MASK); | ||
1875 | } | ||
1876 | |||
1877 | static unsigned long sun4c_swp_offset(swp_entry_t entry) | ||
1878 | { | ||
1879 | return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK; | ||
1880 | } | ||
1881 | |||
1882 | static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset) | ||
1883 | { | ||
1884 | return (swp_entry_t) { | ||
1885 | (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT | ||
1886 | | (type & SUN4C_SWP_TYPE_MASK) }; | ||
1887 | } | ||
1888 | |||
1889 | static void sun4c_free_pte_slow(pte_t *pte) | ||
1890 | { | ||
1891 | free_page((unsigned long)pte); | ||
1892 | } | ||
1893 | |||
1894 | static void sun4c_free_pgd_slow(pgd_t *pgd) | ||
1895 | { | ||
1896 | free_page((unsigned long)pgd); | ||
1897 | } | ||
1898 | |||
1899 | static pgd_t *sun4c_get_pgd_fast(void) | ||
1900 | { | ||
1901 | unsigned long *ret; | ||
1902 | |||
1903 | if ((ret = pgd_quicklist) != NULL) { | ||
1904 | pgd_quicklist = (unsigned long *)(*ret); | ||
1905 | ret[0] = ret[1]; | ||
1906 | pgtable_cache_size--; | ||
1907 | } else { | ||
1908 | pgd_t *init; | ||
1909 | |||
1910 | ret = (unsigned long *)__get_free_page(GFP_KERNEL); | ||
1911 | memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t)); | ||
1912 | init = sun4c_pgd_offset(&init_mm, 0); | ||
1913 | memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | ||
1914 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
1915 | } | ||
1916 | return (pgd_t *)ret; | ||
1917 | } | ||
1918 | |||
1919 | static void sun4c_free_pgd_fast(pgd_t *pgd) | ||
1920 | { | ||
1921 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | ||
1922 | pgd_quicklist = (unsigned long *) pgd; | ||
1923 | pgtable_cache_size++; | ||
1924 | } | ||
1925 | |||
1926 | |||
1927 | static __inline__ pte_t * | ||
1928 | sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) | ||
1929 | { | ||
1930 | unsigned long *ret; | ||
1931 | |||
1932 | if ((ret = (unsigned long *)pte_quicklist) != NULL) { | ||
1933 | pte_quicklist = (unsigned long *)(*ret); | ||
1934 | ret[0] = ret[1]; | ||
1935 | pgtable_cache_size--; | ||
1936 | } | ||
1937 | return (pte_t *)ret; | ||
1938 | } | ||
1939 | |||
1940 | static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
1941 | { | ||
1942 | pte_t *pte; | ||
1943 | |||
1944 | if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL) | ||
1945 | return pte; | ||
1946 | |||
1947 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
1948 | if (pte) | ||
1949 | memset(pte, 0, PAGE_SIZE); | ||
1950 | return pte; | ||
1951 | } | ||
1952 | |||
1953 | static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
1954 | { | ||
1955 | pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address); | ||
1956 | if (pte == NULL) | ||
1957 | return NULL; | ||
1958 | return virt_to_page(pte); | ||
1959 | } | ||
1960 | |||
1961 | static __inline__ void sun4c_free_pte_fast(pte_t *pte) | ||
1962 | { | ||
1963 | *(unsigned long *)pte = (unsigned long) pte_quicklist; | ||
1964 | pte_quicklist = (unsigned long *) pte; | ||
1965 | pgtable_cache_size++; | ||
1966 | } | ||
1967 | |||
1968 | static void sun4c_pte_free(struct page *pte) | ||
1969 | { | ||
1970 | sun4c_free_pte_fast(page_address(pte)); | ||
1971 | } | ||
1972 | |||
1973 | /* | ||
1974 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | ||
1975 | * inside the pgd, so has no extra memory associated with it. | ||
1976 | */ | ||
1977 | static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
1978 | { | ||
1979 | BUG(); | ||
1980 | return NULL; | ||
1981 | } | ||
1982 | |||
1983 | static void sun4c_free_pmd_fast(pmd_t * pmd) { } | ||
1984 | |||
1985 | static void sun4c_check_pgt_cache(int low, int high) | ||
1986 | { | ||
1987 | if (pgtable_cache_size > high) { | ||
1988 | do { | ||
1989 | if (pgd_quicklist) | ||
1990 | sun4c_free_pgd_slow(sun4c_get_pgd_fast()); | ||
1991 | if (pte_quicklist) | ||
1992 | sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0)); | ||
1993 | } while (pgtable_cache_size > low); | ||
1994 | } | ||
1995 | } | ||
1996 | |||
1997 | /* An experiment, turn off by default for now... -DaveM */ | ||
1998 | #define SUN4C_PRELOAD_PSEG | ||
1999 | |||
2000 | void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
2001 | { | ||
2002 | unsigned long flags; | ||
2003 | int pseg; | ||
2004 | |||
2005 | local_irq_save(flags); | ||
2006 | address &= PAGE_MASK; | ||
2007 | if ((pseg = sun4c_get_segmap(address)) == invalid_segment) { | ||
2008 | struct sun4c_mmu_entry *entry = sun4c_user_strategy(); | ||
2009 | struct mm_struct *mm = vma->vm_mm; | ||
2010 | unsigned long start, end; | ||
2011 | |||
2012 | entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK); | ||
2013 | entry->ctx = mm->context; | ||
2014 | add_ring_ordered(sun4c_context_ring + mm->context, entry); | ||
2015 | sun4c_put_segmap(entry->vaddr, entry->pseg); | ||
2016 | end = start + SUN4C_REAL_PGDIR_SIZE; | ||
2017 | while (start < end) { | ||
2018 | #ifdef SUN4C_PRELOAD_PSEG | ||
2019 | pgd_t *pgdp = sun4c_pgd_offset(mm, start); | ||
2020 | pte_t *ptep; | ||
2021 | |||
2022 | if (!pgdp) | ||
2023 | goto no_mapping; | ||
2024 | ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start); | ||
2025 | if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT)) | ||
2026 | goto no_mapping; | ||
2027 | sun4c_put_pte(start, pte_val(*ptep)); | ||
2028 | goto next; | ||
2029 | |||
2030 | no_mapping: | ||
2031 | #endif | ||
2032 | sun4c_put_pte(start, 0); | ||
2033 | #ifdef SUN4C_PRELOAD_PSEG | ||
2034 | next: | ||
2035 | #endif | ||
2036 | start += PAGE_SIZE; | ||
2037 | } | ||
2038 | #ifndef SUN4C_PRELOAD_PSEG | ||
2039 | sun4c_put_pte(address, pte_val(pte)); | ||
2040 | #endif | ||
2041 | local_irq_restore(flags); | ||
2042 | return; | ||
2043 | } else { | ||
2044 | struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg]; | ||
2045 | |||
2046 | remove_lru(entry); | ||
2047 | add_lru(entry); | ||
2048 | } | ||
2049 | |||
2050 | sun4c_put_pte(address, pte_val(pte)); | ||
2051 | local_irq_restore(flags); | ||
2052 | } | ||
2053 | |||
2054 | extern void sparc_context_init(int); | ||
2055 | extern unsigned long end; | ||
2056 | extern unsigned long bootmem_init(unsigned long *pages_avail); | ||
2057 | extern unsigned long last_valid_pfn; | ||
2058 | |||
2059 | void __init sun4c_paging_init(void) | ||
2060 | { | ||
2061 | int i, cnt; | ||
2062 | unsigned long kernel_end, vaddr; | ||
2063 | extern struct resource sparc_iomap; | ||
2064 | unsigned long end_pfn, pages_avail; | ||
2065 | |||
2066 | kernel_end = (unsigned long) &end; | ||
2067 | kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4); | ||
2068 | kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end); | ||
2069 | |||
2070 | pages_avail = 0; | ||
2071 | last_valid_pfn = bootmem_init(&pages_avail); | ||
2072 | end_pfn = last_valid_pfn; | ||
2073 | |||
2074 | sun4c_probe_mmu(); | ||
2075 | invalid_segment = (num_segmaps - 1); | ||
2076 | sun4c_init_mmu_entry_pool(); | ||
2077 | sun4c_init_rings(); | ||
2078 | sun4c_init_map_kernelprom(kernel_end); | ||
2079 | sun4c_init_clean_mmu(kernel_end); | ||
2080 | sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS); | ||
2081 | sun4c_init_lock_area(sparc_iomap.start, IOBASE_END); | ||
2082 | sun4c_init_lock_area(DVMA_VADDR, DVMA_END); | ||
2083 | sun4c_init_lock_areas(); | ||
2084 | sun4c_init_fill_user_ring(); | ||
2085 | |||
2086 | sun4c_set_context(0); | ||
2087 | memset(swapper_pg_dir, 0, PAGE_SIZE); | ||
2088 | memset(pg0, 0, PAGE_SIZE); | ||
2089 | memset(pg1, 0, PAGE_SIZE); | ||
2090 | memset(pg2, 0, PAGE_SIZE); | ||
2091 | memset(pg3, 0, PAGE_SIZE); | ||
2092 | |||
2093 | /* Save work later. */ | ||
2094 | vaddr = VMALLOC_START; | ||
2095 | swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0); | ||
2096 | vaddr += SUN4C_PGDIR_SIZE; | ||
2097 | swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1); | ||
2098 | vaddr += SUN4C_PGDIR_SIZE; | ||
2099 | swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2); | ||
2100 | vaddr += SUN4C_PGDIR_SIZE; | ||
2101 | swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3); | ||
2102 | sun4c_init_ss2_cache_bug(); | ||
2103 | sparc_context_init(num_contexts); | ||
2104 | |||
2105 | { | ||
2106 | unsigned long zones_size[MAX_NR_ZONES]; | ||
2107 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
2108 | unsigned long npages; | ||
2109 | int znum; | ||
2110 | |||
2111 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | ||
2112 | zones_size[znum] = zholes_size[znum] = 0; | ||
2113 | |||
2114 | npages = max_low_pfn - pfn_base; | ||
2115 | |||
2116 | zones_size[ZONE_DMA] = npages; | ||
2117 | zholes_size[ZONE_DMA] = npages - pages_avail; | ||
2118 | |||
2119 | npages = highend_pfn - max_low_pfn; | ||
2120 | zones_size[ZONE_HIGHMEM] = npages; | ||
2121 | zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); | ||
2122 | |||
2123 | free_area_init_node(0, &contig_page_data, zones_size, | ||
2124 | pfn_base, zholes_size); | ||
2125 | } | ||
2126 | |||
2127 | cnt = 0; | ||
2128 | for (i = 0; i < num_segmaps; i++) | ||
2129 | if (mmu_entry_pool[i].locked) | ||
2130 | cnt++; | ||
2131 | |||
2132 | max_user_taken_entries = num_segmaps - cnt - 40 - 1; | ||
2133 | |||
2134 | printk("SUN4C: %d mmu entries for the kernel\n", cnt); | ||
2135 | } | ||
2136 | |||
2137 | /* Load up routines and constants for sun4c mmu */ | ||
2138 | void __init ld_mmu_sun4c(void) | ||
2139 | { | ||
2140 | extern void ___xchg32_sun4c(void); | ||
2141 | |||
2142 | printk("Loading sun4c MMU routines\n"); | ||
2143 | |||
2144 | /* First the constants */ | ||
2145 | BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT); | ||
2146 | BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE); | ||
2147 | BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK); | ||
2148 | |||
2149 | BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD); | ||
2150 | BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD); | ||
2151 | BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE); | ||
2152 | |||
2153 | BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE)); | ||
2154 | BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED)); | ||
2155 | BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY)); | ||
2156 | BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY)); | ||
2157 | BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL)); | ||
2158 | page_kernel = pgprot_val(SUN4C_PAGE_KERNEL); | ||
2159 | pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE | | ||
2160 | _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE; | ||
2161 | |||
2162 | /* Functions */ | ||
2163 | BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM); | ||
2164 | BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM); | ||
2165 | |||
2166 | BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM); | ||
2167 | |||
2168 | if (sun4c_vacinfo.do_hwflushes) { | ||
2169 | BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM); | ||
2170 | BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM); | ||
2171 | BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM); | ||
2172 | } else { | ||
2173 | BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM); | ||
2174 | BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM); | ||
2175 | BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM); | ||
2176 | } | ||
2177 | |||
2178 | BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM); | ||
2179 | BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM); | ||
2180 | BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM); | ||
2181 | BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM); | ||
2182 | BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM); | ||
2183 | BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM); | ||
2184 | BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM); | ||
2185 | BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM); | ||
2186 | BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM); | ||
2187 | BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM); | ||
2188 | |||
2189 | BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP); | ||
2190 | |||
2191 | BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0); | ||
2192 | |||
2193 | /* The 2.4.18 code does not set this on sun4c, how does it work? XXX */ | ||
2194 | /* BTFIXUPSET_SETHI(none_mask, 0x00000000); */ /* Defaults to zero? */ | ||
2195 | |||
2196 | BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM); | ||
2197 | #if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */ | ||
2198 | BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1)); | ||
2199 | #else | ||
2200 | BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM); | ||
2201 | #endif | ||
2202 | BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM); | ||
2203 | BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM); | ||
2204 | |||
2205 | BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM); | ||
2206 | BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0); | ||
2207 | BTFIXUPSET_CALL(pte_read, sun4c_pte_read, BTFIXUPCALL_NORM); | ||
2208 | |||
2209 | BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM); | ||
2210 | BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM); | ||
2211 | BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0); | ||
2212 | |||
2213 | BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0)); | ||
2214 | BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0)); | ||
2215 | BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1)); | ||
2216 | BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP); | ||
2217 | |||
2218 | BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM); | ||
2219 | BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM); | ||
2220 | BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM); | ||
2221 | |||
2222 | BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK); | ||
2223 | BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM); | ||
2224 | BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM); | ||
2225 | BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM); | ||
2226 | BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM); | ||
2227 | BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM); | ||
2228 | BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM); | ||
2229 | BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP); | ||
2230 | BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0); | ||
2231 | BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM); | ||
2232 | BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM); | ||
2233 | |||
2234 | BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE); | ||
2235 | BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED); | ||
2236 | BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED); | ||
2237 | BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE); | ||
2238 | BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE); | ||
2239 | BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE); | ||
2240 | BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ); | ||
2241 | BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM); | ||
2242 | BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM); | ||
2243 | BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM); | ||
2244 | BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM); | ||
2245 | |||
2246 | BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM); | ||
2247 | BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM); | ||
2248 | |||
2249 | BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM); | ||
2250 | BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM); | ||
2251 | |||
2252 | BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM); | ||
2253 | BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM); | ||
2254 | BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM); | ||
2255 | BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM); | ||
2256 | |||
2257 | BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM); | ||
2258 | BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM); | ||
2259 | BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM); | ||
2260 | |||
2261 | BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM); | ||
2262 | BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM); | ||
2263 | |||
2264 | BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM); | ||
2265 | BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM); | ||
2266 | BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM); | ||
2267 | |||
2268 | BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM); | ||
2269 | BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM); | ||
2270 | |||
2271 | BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM); | ||
2272 | |||
2273 | /* These should _never_ get called with two level tables. */ | ||
2274 | BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP); | ||
2275 | BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0); | ||
2276 | } | ||
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S new file mode 100644 index 000000000000..2dcaa5ac1a38 --- /dev/null +++ b/arch/sparc/mm/swift.S | |||
@@ -0,0 +1,256 @@ | |||
1 | /* $Id: swift.S,v 1.9 2002/01/08 11:11:59 davem Exp $ | ||
2 | * swift.S: MicroSparc-II mmu/cache operations. | ||
3 | * | ||
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <asm/psr.h> | ||
9 | #include <asm/asi.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/pgtsrmmu.h> | ||
12 | #include <asm/asm_offsets.h> | ||
13 | |||
14 | .text | ||
15 | .align 4 | ||
16 | |||
17 | #if 1 /* XXX screw this, I can't get the VAC flushes working | ||
18 | * XXX reliably... -DaveM | ||
19 | */ | ||
20 | .globl swift_flush_cache_all, swift_flush_cache_mm | ||
21 | .globl swift_flush_cache_range, swift_flush_cache_page | ||
22 | .globl swift_flush_page_for_dma | ||
23 | .globl swift_flush_page_to_ram | ||
24 | |||
25 | swift_flush_cache_all: | ||
26 | swift_flush_cache_mm: | ||
27 | swift_flush_cache_range: | ||
28 | swift_flush_cache_page: | ||
29 | swift_flush_page_for_dma: | ||
30 | swift_flush_page_to_ram: | ||
31 | sethi %hi(0x2000), %o0 | ||
32 | 1: subcc %o0, 0x10, %o0 | ||
33 | add %o0, %o0, %o1 | ||
34 | sta %g0, [%o0] ASI_M_DATAC_TAG | ||
35 | bne 1b | ||
36 | sta %g0, [%o1] ASI_M_TXTC_TAG | ||
37 | retl | ||
38 | nop | ||
39 | #else | ||
40 | |||
41 | .globl swift_flush_cache_all | ||
42 | swift_flush_cache_all: | ||
43 | WINDOW_FLUSH(%g4, %g5) | ||
44 | |||
45 | /* Just clear out all the tags. */ | ||
46 | sethi %hi(16 * 1024), %o0 | ||
47 | 1: subcc %o0, 16, %o0 | ||
48 | sta %g0, [%o0] ASI_M_TXTC_TAG | ||
49 | bne 1b | ||
50 | sta %g0, [%o0] ASI_M_DATAC_TAG | ||
51 | retl | ||
52 | nop | ||
53 | |||
54 | .globl swift_flush_cache_mm | ||
55 | swift_flush_cache_mm: | ||
56 | ld [%o0 + AOFF_mm_context], %g2 | ||
57 | cmp %g2, -1 | ||
58 | be swift_flush_cache_mm_out | ||
59 | WINDOW_FLUSH(%g4, %g5) | ||
60 | rd %psr, %g1 | ||
61 | andn %g1, PSR_ET, %g3 | ||
62 | wr %g3, 0x0, %psr | ||
63 | nop | ||
64 | nop | ||
65 | mov SRMMU_CTX_REG, %g7 | ||
66 | lda [%g7] ASI_M_MMUREGS, %g5 | ||
67 | sta %g2, [%g7] ASI_M_MMUREGS | ||
68 | |||
69 | #if 1 | ||
70 | sethi %hi(0x2000), %o0 | ||
71 | 1: subcc %o0, 0x10, %o0 | ||
72 | sta %g0, [%o0] ASI_M_FLUSH_CTX | ||
73 | bne 1b | ||
74 | nop | ||
75 | #else | ||
76 | clr %o0 | ||
77 | or %g0, 2048, %g7 | ||
78 | or %g0, 2048, %o1 | ||
79 | add %o1, 2048, %o2 | ||
80 | add %o2, 2048, %o3 | ||
81 | mov 16, %o4 | ||
82 | add %o4, 2048, %o5 | ||
83 | add %o5, 2048, %g2 | ||
84 | add %g2, 2048, %g3 | ||
85 | 1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX | ||
86 | sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX | ||
87 | sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX | ||
88 | sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX | ||
89 | sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX | ||
90 | sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX | ||
91 | sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX | ||
92 | sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX | ||
93 | subcc %g7, 32, %g7 | ||
94 | bne 1b | ||
95 | add %o0, 32, %o0 | ||
96 | #endif | ||
97 | |||
98 | mov SRMMU_CTX_REG, %g7 | ||
99 | sta %g5, [%g7] ASI_M_MMUREGS | ||
100 | wr %g1, 0x0, %psr | ||
101 | nop | ||
102 | nop | ||
103 | swift_flush_cache_mm_out: | ||
104 | retl | ||
105 | nop | ||
106 | |||
107 | .globl swift_flush_cache_range | ||
108 | swift_flush_cache_range: | ||
109 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
110 | sub %o2, %o1, %o2 | ||
111 | sethi %hi(4096), %o3 | ||
112 | cmp %o2, %o3 | ||
113 | bgu swift_flush_cache_mm | ||
114 | nop | ||
115 | b 70f | ||
116 | nop | ||
117 | |||
118 | .globl swift_flush_cache_page | ||
119 | swift_flush_cache_page: | ||
120 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
121 | 70: | ||
122 | ld [%o0 + AOFF_mm_context], %g2 | ||
123 | cmp %g2, -1 | ||
124 | be swift_flush_cache_page_out | ||
125 | WINDOW_FLUSH(%g4, %g5) | ||
126 | rd %psr, %g1 | ||
127 | andn %g1, PSR_ET, %g3 | ||
128 | wr %g3, 0x0, %psr | ||
129 | nop | ||
130 | nop | ||
131 | mov SRMMU_CTX_REG, %g7 | ||
132 | lda [%g7] ASI_M_MMUREGS, %g5 | ||
133 | sta %g2, [%g7] ASI_M_MMUREGS | ||
134 | |||
135 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
136 | #if 1 | ||
137 | sethi %hi(0x1000), %o0 | ||
138 | 1: subcc %o0, 0x10, %o0 | ||
139 | sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE | ||
140 | bne 1b | ||
141 | nop | ||
142 | #else | ||
143 | or %g0, 512, %g7 | ||
144 | or %g0, 512, %o0 | ||
145 | add %o0, 512, %o2 | ||
146 | add %o2, 512, %o3 | ||
147 | add %o3, 512, %o4 | ||
148 | add %o4, 512, %o5 | ||
149 | add %o5, 512, %g3 | ||
150 | add %g3, 512, %g4 | ||
151 | 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE | ||
152 | sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE | ||
153 | sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE | ||
154 | sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE | ||
155 | sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE | ||
156 | sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE | ||
157 | sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE | ||
158 | sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE | ||
159 | subcc %g7, 16, %g7 | ||
160 | bne 1b | ||
161 | add %o1, 16, %o1 | ||
162 | #endif | ||
163 | |||
164 | mov SRMMU_CTX_REG, %g7 | ||
165 | sta %g5, [%g7] ASI_M_MMUREGS | ||
166 | wr %g1, 0x0, %psr | ||
167 | nop | ||
168 | nop | ||
169 | swift_flush_cache_page_out: | ||
170 | retl | ||
171 | nop | ||
172 | |||
173 | /* Swift is write-thru, however it is not | ||
174 | * I/O nor TLB-walk coherent. Also it has | ||
175 | * caches which are virtually indexed and tagged. | ||
176 | */ | ||
177 | .globl swift_flush_page_for_dma | ||
178 | .globl swift_flush_page_to_ram | ||
179 | swift_flush_page_for_dma: | ||
180 | swift_flush_page_to_ram: | ||
181 | andn %o0, (PAGE_SIZE - 1), %o1 | ||
182 | #if 1 | ||
183 | sethi %hi(0x1000), %o0 | ||
184 | 1: subcc %o0, 0x10, %o0 | ||
185 | sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE | ||
186 | bne 1b | ||
187 | nop | ||
188 | #else | ||
189 | or %g0, 512, %g7 | ||
190 | or %g0, 512, %o0 | ||
191 | add %o0, 512, %o2 | ||
192 | add %o2, 512, %o3 | ||
193 | add %o3, 512, %o4 | ||
194 | add %o4, 512, %o5 | ||
195 | add %o5, 512, %g3 | ||
196 | add %g3, 512, %g4 | ||
197 | 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE | ||
198 | sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE | ||
199 | sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE | ||
200 | sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE | ||
201 | sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE | ||
202 | sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE | ||
203 | sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE | ||
204 | sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE | ||
205 | subcc %g7, 16, %g7 | ||
206 | bne 1b | ||
207 | add %o1, 16, %o1 | ||
208 | #endif | ||
209 | retl | ||
210 | nop | ||
211 | #endif | ||
212 | |||
213 | .globl swift_flush_sig_insns | ||
214 | swift_flush_sig_insns: | ||
215 | flush %o1 | ||
216 | retl | ||
217 | flush %o1 + 4 | ||
218 | |||
219 | .globl swift_flush_tlb_mm | ||
220 | .globl swift_flush_tlb_range | ||
221 | .globl swift_flush_tlb_all | ||
222 | swift_flush_tlb_range: | ||
223 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
224 | swift_flush_tlb_mm: | ||
225 | ld [%o0 + AOFF_mm_context], %g2 | ||
226 | cmp %g2, -1 | ||
227 | be swift_flush_tlb_all_out | ||
228 | swift_flush_tlb_all: | ||
229 | mov 0x400, %o1 | ||
230 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
231 | swift_flush_tlb_all_out: | ||
232 | retl | ||
233 | nop | ||
234 | |||
235 | .globl swift_flush_tlb_page | ||
236 | swift_flush_tlb_page: | ||
237 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
238 | mov SRMMU_CTX_REG, %g1 | ||
239 | ld [%o0 + AOFF_mm_context], %o3 | ||
240 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
241 | cmp %o3, -1 | ||
242 | be swift_flush_tlb_page_out | ||
243 | nop | ||
244 | #if 1 | ||
245 | mov 0x400, %o1 | ||
246 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
247 | #else | ||
248 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
249 | sta %o3, [%g1] ASI_M_MMUREGS | ||
250 | sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */ | ||
251 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
252 | sta %g5, [%g1] ASI_M_MMUREGS | ||
253 | #endif | ||
254 | swift_flush_tlb_page_out: | ||
255 | retl | ||
256 | nop | ||
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S new file mode 100644 index 000000000000..8acd1787fde2 --- /dev/null +++ b/arch/sparc/mm/tsunami.S | |||
@@ -0,0 +1,133 @@ | |||
1 | /* $Id: tsunami.S,v 1.7 2001/12/21 04:56:15 davem Exp $ | ||
2 | * tsunami.S: High speed MicroSparc-I mmu/cache operations. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/asm_offsets.h> | ||
10 | #include <asm/psr.h> | ||
11 | #include <asm/asi.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/pgtsrmmu.h> | ||
14 | |||
15 | .text | ||
16 | .align 4 | ||
17 | |||
18 | .globl tsunami_flush_cache_all, tsunami_flush_cache_mm | ||
19 | .globl tsunami_flush_cache_range, tsunami_flush_cache_page | ||
20 | .globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma | ||
21 | .globl tsunami_flush_sig_insns | ||
22 | .globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm | ||
23 | .globl tsunami_flush_tlb_range, tsunami_flush_tlb_page | ||
24 | |||
25 | /* Sliiick... */ | ||
26 | tsunami_flush_cache_page: | ||
27 | tsunami_flush_cache_range: | ||
28 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
29 | tsunami_flush_cache_mm: | ||
30 | ld [%o0 + AOFF_mm_context], %g2 | ||
31 | cmp %g2, -1 | ||
32 | be tsunami_flush_cache_out | ||
33 | tsunami_flush_cache_all: | ||
34 | WINDOW_FLUSH(%g4, %g5) | ||
35 | tsunami_flush_page_for_dma: | ||
36 | sta %g0, [%g0] ASI_M_IC_FLCLEAR | ||
37 | sta %g0, [%g0] ASI_M_DC_FLCLEAR | ||
38 | tsunami_flush_cache_out: | ||
39 | tsunami_flush_page_to_ram: | ||
40 | retl | ||
41 | nop | ||
42 | |||
43 | tsunami_flush_sig_insns: | ||
44 | flush %o1 | ||
45 | retl | ||
46 | flush %o1 + 4 | ||
47 | |||
48 | /* More slick stuff... */ | ||
49 | tsunami_flush_tlb_range: | ||
50 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
51 | tsunami_flush_tlb_mm: | ||
52 | ld [%o0 + AOFF_mm_context], %g2 | ||
53 | cmp %g2, -1 | ||
54 | be tsunami_flush_tlb_out | ||
55 | tsunami_flush_tlb_all: | ||
56 | mov 0x400, %o1 | ||
57 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
58 | nop | ||
59 | nop | ||
60 | nop | ||
61 | nop | ||
62 | nop | ||
63 | tsunami_flush_tlb_out: | ||
64 | retl | ||
65 | nop | ||
66 | |||
67 | /* This one can be done in a fine grained manner... */ | ||
68 | tsunami_flush_tlb_page: | ||
69 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
70 | mov SRMMU_CTX_REG, %g1 | ||
71 | ld [%o0 + AOFF_mm_context], %o3 | ||
72 | andn %o1, (PAGE_SIZE - 1), %o1 | ||
73 | cmp %o3, -1 | ||
74 | be tsunami_flush_tlb_page_out | ||
75 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
76 | sta %o3, [%g1] ASI_M_MMUREGS | ||
77 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
78 | nop | ||
79 | nop | ||
80 | nop | ||
81 | nop | ||
82 | nop | ||
83 | tsunami_flush_tlb_page_out: | ||
84 | retl | ||
85 | sta %g5, [%g1] ASI_M_MMUREGS | ||
86 | |||
87 | #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \ | ||
88 | ldd [src + offset + 0x18], t0; \ | ||
89 | std t0, [dst + offset + 0x18]; \ | ||
90 | ldd [src + offset + 0x10], t2; \ | ||
91 | std t2, [dst + offset + 0x10]; \ | ||
92 | ldd [src + offset + 0x08], t0; \ | ||
93 | std t0, [dst + offset + 0x08]; \ | ||
94 | ldd [src + offset + 0x00], t2; \ | ||
95 | std t2, [dst + offset + 0x00]; | ||
96 | |||
97 | .globl tsunami_copy_1page | ||
98 | tsunami_copy_1page: | ||
99 | /* NOTE: This routine has to be shorter than 70insns --jj */ | ||
100 | or %g0, (PAGE_SIZE >> 8), %g1 | ||
101 | 1: | ||
102 | MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5) | ||
103 | MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5) | ||
104 | MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5) | ||
105 | MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5) | ||
106 | MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5) | ||
107 | MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5) | ||
108 | MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5) | ||
109 | MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5) | ||
110 | subcc %g1, 1, %g1 | ||
111 | add %o0, 0x100, %o0 | ||
112 | bne 1b | ||
113 | add %o1, 0x100, %o1 | ||
114 | |||
115 | .globl tsunami_setup_blockops | ||
116 | tsunami_setup_blockops: | ||
117 | sethi %hi(__copy_1page), %o0 | ||
118 | or %o0, %lo(__copy_1page), %o0 | ||
119 | sethi %hi(tsunami_copy_1page), %o1 | ||
120 | or %o1, %lo(tsunami_copy_1page), %o1 | ||
121 | sethi %hi(tsunami_setup_blockops), %o2 | ||
122 | or %o2, %lo(tsunami_setup_blockops), %o2 | ||
123 | ld [%o1], %o4 | ||
124 | 1: add %o1, 4, %o1 | ||
125 | st %o4, [%o0] | ||
126 | add %o0, 4, %o0 | ||
127 | cmp %o1, %o2 | ||
128 | bne 1b | ||
129 | ld [%o1], %o4 | ||
130 | sta %g0, [%g0] ASI_M_IC_FLCLEAR | ||
131 | sta %g0, [%g0] ASI_M_DC_FLCLEAR | ||
132 | retl | ||
133 | nop | ||
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S new file mode 100644 index 000000000000..f58712d26bf5 --- /dev/null +++ b/arch/sparc/mm/viking.S | |||
@@ -0,0 +1,284 @@ | |||
1 | /* $Id: viking.S,v 1.19 2001/12/21 04:56:15 davem Exp $ | ||
2 | * viking.S: High speed Viking cache/mmu operations | ||
3 | * | ||
4 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) | ||
5 | * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/ptrace.h> | ||
11 | #include <asm/psr.h> | ||
12 | #include <asm/asm_offsets.h> | ||
13 | #include <asm/asi.h> | ||
14 | #include <asm/mxcc.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtsrmmu.h> | ||
17 | #include <asm/viking.h> | ||
18 | #include <asm/btfixup.h> | ||
19 | |||
20 | #ifdef CONFIG_SMP | ||
21 | .data | ||
22 | .align 4 | ||
23 | sun4dsmp_flush_tlb_spin: | ||
24 | .word 0 | ||
25 | #endif | ||
26 | |||
27 | .text | ||
28 | .align 4 | ||
29 | |||
30 | .globl viking_flush_cache_all, viking_flush_cache_mm | ||
31 | .globl viking_flush_cache_range, viking_flush_cache_page | ||
32 | .globl viking_flush_page, viking_mxcc_flush_page | ||
33 | .globl viking_flush_page_for_dma, viking_flush_page_to_ram | ||
34 | .globl viking_flush_sig_insns | ||
35 | .globl viking_flush_tlb_all, viking_flush_tlb_mm | ||
36 | .globl viking_flush_tlb_range, viking_flush_tlb_page | ||
37 | |||
38 | viking_flush_page: | ||
39 | sethi %hi(PAGE_OFFSET), %g2 | ||
40 | sub %o0, %g2, %g3 | ||
41 | srl %g3, 12, %g1 ! ppage >> 12 | ||
42 | |||
43 | clr %o1 ! set counter, 0 - 127 | ||
44 | sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3 | ||
45 | sethi %hi(0x80000000), %o4 | ||
46 | sethi %hi(VIKING_PTAG_VALID), %o5 | ||
47 | sethi %hi(2*PAGE_SIZE), %o0 | ||
48 | sethi %hi(PAGE_SIZE), %g7 | ||
49 | clr %o2 ! block counter, 0 - 3 | ||
50 | 5: | ||
51 | sll %o1, 5, %g4 | ||
52 | or %g4, %o4, %g4 ! 0x80000000 | (set << 5) | ||
53 | |||
54 | sll %o2, 26, %g5 ! block << 26 | ||
55 | 6: | ||
56 | or %g5, %g4, %g5 | ||
57 | ldda [%g5] ASI_M_DATAC_TAG, %g2 | ||
58 | cmp %g3, %g1 ! ptag == ppage? | ||
59 | bne 7f | ||
60 | inc %o2 | ||
61 | |||
62 | andcc %g2, %o5, %g0 ! ptag VALID? | ||
63 | be 7f | ||
64 | add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5) | ||
65 | ld [%g2], %g3 | ||
66 | ld [%g2 + %g7], %g3 | ||
67 | add %g2, %o0, %g2 | ||
68 | ld [%g2], %g3 | ||
69 | ld [%g2 + %g7], %g3 | ||
70 | add %g2, %o0, %g2 | ||
71 | ld [%g2], %g3 | ||
72 | ld [%g2 + %g7], %g3 | ||
73 | add %g2, %o0, %g2 | ||
74 | ld [%g2], %g3 | ||
75 | b 8f | ||
76 | ld [%g2 + %g7], %g3 | ||
77 | |||
78 | 7: | ||
79 | cmp %o2, 3 | ||
80 | ble 6b | ||
81 | sll %o2, 26, %g5 ! block << 26 | ||
82 | |||
83 | 8: inc %o1 | ||
84 | cmp %o1, 0x7f | ||
85 | ble 5b | ||
86 | clr %o2 | ||
87 | |||
88 | 9: retl | ||
89 | nop | ||
90 | |||
91 | viking_mxcc_flush_page: | ||
92 | sethi %hi(PAGE_OFFSET), %g2 | ||
93 | sub %o0, %g2, %g3 | ||
94 | sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE | ||
95 | sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM) | ||
96 | mov 0x10, %g2 ! set cacheable bit | ||
97 | or %o3, %lo(MXCC_SRCSTREAM), %o2 | ||
98 | or %o3, %lo(MXCC_DESSTREAM), %o3 | ||
99 | sub %g3, MXCC_STREAM_SIZE, %g3 | ||
100 | 6: | ||
101 | stda %g2, [%o2] ASI_M_MXCC | ||
102 | stda %g2, [%o3] ASI_M_MXCC | ||
103 | andncc %g3, PAGE_MASK, %g0 | ||
104 | bne 6b | ||
105 | sub %g3, MXCC_STREAM_SIZE, %g3 | ||
106 | |||
107 | 9: retl | ||
108 | nop | ||
109 | |||
110 | viking_flush_cache_page: | ||
111 | viking_flush_cache_range: | ||
112 | #ifndef CONFIG_SMP | ||
113 | ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ | ||
114 | #endif | ||
115 | viking_flush_cache_mm: | ||
116 | #ifndef CONFIG_SMP | ||
117 | ld [%o0 + AOFF_mm_context], %g1 | ||
118 | cmp %g1, -1 | ||
119 | bne viking_flush_cache_all | ||
120 | nop | ||
121 | b,a viking_flush_cache_out | ||
122 | #endif | ||
123 | viking_flush_cache_all: | ||
124 | WINDOW_FLUSH(%g4, %g5) | ||
125 | viking_flush_cache_out: | ||
126 | retl | ||
127 | nop | ||
128 | |||
129 | viking_flush_tlb_all: | ||
130 | mov 0x400, %g1 | ||
131 | retl | ||
132 | sta %g0, [%g1] ASI_M_FLUSH_PROBE | ||
133 | |||
134 | viking_flush_tlb_mm: | ||
135 | mov SRMMU_CTX_REG, %g1 | ||
136 | ld [%o0 + AOFF_mm_context], %o1 | ||
137 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
138 | #ifndef CONFIG_SMP | ||
139 | cmp %o1, -1 | ||
140 | be 1f | ||
141 | #endif | ||
142 | mov 0x300, %g2 | ||
143 | sta %o1, [%g1] ASI_M_MMUREGS | ||
144 | sta %g0, [%g2] ASI_M_FLUSH_PROBE | ||
145 | retl | ||
146 | sta %g5, [%g1] ASI_M_MMUREGS | ||
147 | #ifndef CONFIG_SMP | ||
148 | 1: retl | ||
149 | nop | ||
150 | #endif | ||
151 | |||
152 | viking_flush_tlb_range: | ||
153 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
154 | mov SRMMU_CTX_REG, %g1 | ||
155 | ld [%o0 + AOFF_mm_context], %o3 | ||
156 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
157 | #ifndef CONFIG_SMP | ||
158 | cmp %o3, -1 | ||
159 | be 2f | ||
160 | #endif | ||
161 | sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 | ||
162 | sta %o3, [%g1] ASI_M_MMUREGS | ||
163 | and %o1, %o4, %o1 | ||
164 | add %o1, 0x200, %o1 | ||
165 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
166 | 1: sub %o1, %o4, %o1 | ||
167 | cmp %o1, %o2 | ||
168 | blu,a 1b | ||
169 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
170 | retl | ||
171 | sta %g5, [%g1] ASI_M_MMUREGS | ||
172 | #ifndef CONFIG_SMP | ||
173 | 2: retl | ||
174 | nop | ||
175 | #endif | ||
176 | |||
177 | viking_flush_tlb_page: | ||
178 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
179 | mov SRMMU_CTX_REG, %g1 | ||
180 | ld [%o0 + AOFF_mm_context], %o3 | ||
181 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
182 | #ifndef CONFIG_SMP | ||
183 | cmp %o3, -1 | ||
184 | be 1f | ||
185 | #endif | ||
186 | and %o1, PAGE_MASK, %o1 | ||
187 | sta %o3, [%g1] ASI_M_MMUREGS | ||
188 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
189 | retl | ||
190 | sta %g5, [%g1] ASI_M_MMUREGS | ||
191 | #ifndef CONFIG_SMP | ||
192 | 1: retl | ||
193 | nop | ||
194 | #endif | ||
195 | |||
196 | viking_flush_page_to_ram: | ||
197 | viking_flush_page_for_dma: | ||
198 | viking_flush_sig_insns: | ||
199 | retl | ||
200 | nop | ||
201 | |||
202 | #ifdef CONFIG_SMP | ||
203 | .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm | ||
204 | .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page | ||
205 | sun4dsmp_flush_tlb_all: | ||
206 | sethi %hi(sun4dsmp_flush_tlb_spin), %g3 | ||
207 | 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
208 | tst %g5 | ||
209 | bne 2f | ||
210 | mov 0x400, %g1 | ||
211 | sta %g0, [%g1] ASI_M_FLUSH_PROBE | ||
212 | retl | ||
213 | stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] | ||
214 | 2: tst %g5 | ||
215 | bne,a 2b | ||
216 | ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
217 | b,a 1b | ||
218 | |||
219 | sun4dsmp_flush_tlb_mm: | ||
220 | sethi %hi(sun4dsmp_flush_tlb_spin), %g3 | ||
221 | 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
222 | tst %g5 | ||
223 | bne 2f | ||
224 | mov SRMMU_CTX_REG, %g1 | ||
225 | ld [%o0 + AOFF_mm_context], %o1 | ||
226 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
227 | mov 0x300, %g2 | ||
228 | sta %o1, [%g1] ASI_M_MMUREGS | ||
229 | sta %g0, [%g2] ASI_M_FLUSH_PROBE | ||
230 | sta %g5, [%g1] ASI_M_MMUREGS | ||
231 | retl | ||
232 | stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] | ||
233 | 2: tst %g5 | ||
234 | bne,a 2b | ||
235 | ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
236 | b,a 1b | ||
237 | |||
238 | sun4dsmp_flush_tlb_range: | ||
239 | sethi %hi(sun4dsmp_flush_tlb_spin), %g3 | ||
240 | 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
241 | tst %g5 | ||
242 | bne 3f | ||
243 | mov SRMMU_CTX_REG, %g1 | ||
244 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
245 | ld [%o0 + AOFF_mm_context], %o3 | ||
246 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
247 | sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 | ||
248 | sta %o3, [%g1] ASI_M_MMUREGS | ||
249 | and %o1, %o4, %o1 | ||
250 | add %o1, 0x200, %o1 | ||
251 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
252 | 2: sub %o1, %o4, %o1 | ||
253 | cmp %o1, %o2 | ||
254 | blu,a 2b | ||
255 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
256 | sta %g5, [%g1] ASI_M_MMUREGS | ||
257 | retl | ||
258 | stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] | ||
259 | 3: tst %g5 | ||
260 | bne,a 3b | ||
261 | ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
262 | b,a 1b | ||
263 | |||
264 | sun4dsmp_flush_tlb_page: | ||
265 | sethi %hi(sun4dsmp_flush_tlb_spin), %g3 | ||
266 | 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
267 | tst %g5 | ||
268 | bne 2f | ||
269 | mov SRMMU_CTX_REG, %g1 | ||
270 | ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ | ||
271 | ld [%o0 + AOFF_mm_context], %o3 | ||
272 | lda [%g1] ASI_M_MMUREGS, %g5 | ||
273 | and %o1, PAGE_MASK, %o1 | ||
274 | sta %o3, [%g1] ASI_M_MMUREGS | ||
275 | sta %g0, [%o1] ASI_M_FLUSH_PROBE | ||
276 | sta %g5, [%g1] ASI_M_MMUREGS | ||
277 | retl | ||
278 | stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] | ||
279 | 2: tst %g5 | ||
280 | bne,a 2b | ||
281 | ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 | ||
282 | b,a 1b | ||
283 | nop | ||
284 | #endif | ||