diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2008-11-16 23:08:45 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-12-04 12:16:59 -0500 |
commit | 27137e5285a3388e8f86d7bc5fe0ed8b92bd4624 (patch) | |
tree | 70cd698fb5561743913b5f7615f61df6e8883537 /arch/sparc | |
parent | c37ddd936d96b46cf2bb17e7b1a18b2bd24ec1fb (diff) |
sparc,sparc64: unify mm/
- move all sparc64/mm/ files to arch/sparc/mm/
- commonly named files are named _64.c
- add files to sparc/mm/Makefile preserving link order
- delete now unused sparc64/mm/Makefile
- sparc64 now finds mm/ in sparc
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 440 | ||||
-rw-r--r-- | arch/sparc/mm/generic_64.c | 163 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 357 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 2360 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.h | 49 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 97 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 496 | ||||
-rw-r--r-- | arch/sparc/mm/ultra.S | 767 |
9 files changed, 4734 insertions, 1 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 3ad1b1f9953e..681abe0a4594 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile | |||
@@ -4,13 +4,17 @@ | |||
4 | asflags-y := -ansi | 4 | asflags-y := -ansi |
5 | ccflags-y := -Werror | 5 | ccflags-y := -Werror |
6 | 6 | ||
7 | obj-y := fault_$(BITS).o | 7 | obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o |
8 | obj-y += fault_$(BITS).o | ||
8 | obj-y += init_$(BITS).o | 9 | obj-y += init_$(BITS).o |
9 | obj-$(CONFIG_SPARC32) += loadmmu.o | 10 | obj-$(CONFIG_SPARC32) += loadmmu.o |
10 | obj-y += generic_$(BITS).o | 11 | obj-y += generic_$(BITS).o |
11 | obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o | 12 | obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o |
12 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o | 13 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o |
13 | 14 | ||
15 | # Only used by sparc64 | ||
16 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
17 | |||
14 | # Only used by sparc32 | 18 | # Only used by sparc32 |
15 | obj-$(CONFIG_HIGHMEM) += highmem.o | 19 | obj-$(CONFIG_HIGHMEM) += highmem.o |
16 | 20 | ||
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c new file mode 100644 index 000000000000..a9e474bf6385 --- /dev/null +++ b/arch/sparc/mm/fault_64.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc. | ||
3 | * | ||
4 | * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) | ||
5 | * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) | ||
6 | */ | ||
7 | |||
8 | #include <asm/head.h> | ||
9 | |||
10 | #include <linux/string.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/signal.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/kprobes.h> | ||
21 | #include <linux/kdebug.h> | ||
22 | |||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/openprom.h> | ||
26 | #include <asm/oplib.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/asi.h> | ||
29 | #include <asm/lsu.h> | ||
30 | #include <asm/sections.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | |||
33 | #ifdef CONFIG_KPROBES | ||
34 | static inline int notify_page_fault(struct pt_regs *regs) | ||
35 | { | ||
36 | int ret = 0; | ||
37 | |||
38 | /* kprobe_running() needs smp_processor_id() */ | ||
39 | if (!user_mode(regs)) { | ||
40 | preempt_disable(); | ||
41 | if (kprobe_running() && kprobe_fault_handler(regs, 0)) | ||
42 | ret = 1; | ||
43 | preempt_enable(); | ||
44 | } | ||
45 | return ret; | ||
46 | } | ||
47 | #else | ||
48 | static inline int notify_page_fault(struct pt_regs *regs) | ||
49 | { | ||
50 | return 0; | ||
51 | } | ||
52 | #endif | ||
53 | |||
54 | static void __kprobes unhandled_fault(unsigned long address, | ||
55 | struct task_struct *tsk, | ||
56 | struct pt_regs *regs) | ||
57 | { | ||
58 | if ((unsigned long) address < PAGE_SIZE) { | ||
59 | printk(KERN_ALERT "Unable to handle kernel NULL " | ||
60 | "pointer dereference\n"); | ||
61 | } else { | ||
62 | printk(KERN_ALERT "Unable to handle kernel paging request " | ||
63 | "at virtual address %016lx\n", (unsigned long)address); | ||
64 | } | ||
65 | printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n", | ||
66 | (tsk->mm ? | ||
67 | CTX_HWBITS(tsk->mm->context) : | ||
68 | CTX_HWBITS(tsk->active_mm->context))); | ||
69 | printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n", | ||
70 | (tsk->mm ? (unsigned long) tsk->mm->pgd : | ||
71 | (unsigned long) tsk->active_mm->pgd)); | ||
72 | die_if_kernel("Oops", regs); | ||
73 | } | ||
74 | |||
75 | static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) | ||
76 | { | ||
77 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", | ||
78 | regs->tpc); | ||
79 | printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); | ||
80 | printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); | ||
81 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); | ||
82 | dump_stack(); | ||
83 | unhandled_fault(regs->tpc, current, regs); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * We now make sure that mmap_sem is held in all paths that call | ||
88 | * this. Additionally, to prevent kswapd from ripping ptes from | ||
89 | * under us, raise interrupts around the time that we look at the | ||
90 | * pte, kswapd will have to wait to get his smp ipi response from | ||
91 | * us. vmtruncate likewise. This saves us having to get pte lock. | ||
92 | */ | ||
93 | static unsigned int get_user_insn(unsigned long tpc) | ||
94 | { | ||
95 | pgd_t *pgdp = pgd_offset(current->mm, tpc); | ||
96 | pud_t *pudp; | ||
97 | pmd_t *pmdp; | ||
98 | pte_t *ptep, pte; | ||
99 | unsigned long pa; | ||
100 | u32 insn = 0; | ||
101 | unsigned long pstate; | ||
102 | |||
103 | if (pgd_none(*pgdp)) | ||
104 | goto outret; | ||
105 | pudp = pud_offset(pgdp, tpc); | ||
106 | if (pud_none(*pudp)) | ||
107 | goto outret; | ||
108 | pmdp = pmd_offset(pudp, tpc); | ||
109 | if (pmd_none(*pmdp)) | ||
110 | goto outret; | ||
111 | |||
112 | /* This disables preemption for us as well. */ | ||
113 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | ||
114 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | ||
115 | : : "r" (pstate), "i" (PSTATE_IE)); | ||
116 | ptep = pte_offset_map(pmdp, tpc); | ||
117 | pte = *ptep; | ||
118 | if (!pte_present(pte)) | ||
119 | goto out; | ||
120 | |||
121 | pa = (pte_pfn(pte) << PAGE_SHIFT); | ||
122 | pa += (tpc & ~PAGE_MASK); | ||
123 | |||
124 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | ||
125 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
126 | : "=r" (insn) | ||
127 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
128 | |||
129 | out: | ||
130 | pte_unmap(ptep); | ||
131 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | ||
132 | outret: | ||
133 | return insn; | ||
134 | } | ||
135 | |||
136 | extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int); | ||
137 | |||
138 | static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, | ||
139 | unsigned int insn, int fault_code) | ||
140 | { | ||
141 | siginfo_t info; | ||
142 | |||
143 | info.si_code = code; | ||
144 | info.si_signo = sig; | ||
145 | info.si_errno = 0; | ||
146 | if (fault_code & FAULT_CODE_ITLB) | ||
147 | info.si_addr = (void __user *) regs->tpc; | ||
148 | else | ||
149 | info.si_addr = (void __user *) | ||
150 | compute_effective_address(regs, insn, 0); | ||
151 | info.si_trapno = 0; | ||
152 | force_sig_info(sig, &info, current); | ||
153 | } | ||
154 | |||
155 | extern int handle_ldf_stq(u32, struct pt_regs *); | ||
156 | extern int handle_ld_nf(u32, struct pt_regs *); | ||
157 | |||
158 | static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) | ||
159 | { | ||
160 | if (!insn) { | ||
161 | if (!regs->tpc || (regs->tpc & 0x3)) | ||
162 | return 0; | ||
163 | if (regs->tstate & TSTATE_PRIV) { | ||
164 | insn = *(unsigned int *) regs->tpc; | ||
165 | } else { | ||
166 | insn = get_user_insn(regs->tpc); | ||
167 | } | ||
168 | } | ||
169 | return insn; | ||
170 | } | ||
171 | |||
172 | static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, | ||
173 | unsigned int insn, unsigned long address) | ||
174 | { | ||
175 | unsigned char asi = ASI_P; | ||
176 | |||
177 | if ((!insn) && (regs->tstate & TSTATE_PRIV)) | ||
178 | goto cannot_handle; | ||
179 | |||
180 | /* If user insn could be read (thus insn is zero), that | ||
181 | * is fine. We will just gun down the process with a signal | ||
182 | * in that case. | ||
183 | */ | ||
184 | |||
185 | if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) && | ||
186 | (insn & 0xc0800000) == 0xc0800000) { | ||
187 | if (insn & 0x2000) | ||
188 | asi = (regs->tstate >> 24); | ||
189 | else | ||
190 | asi = (insn >> 5); | ||
191 | if ((asi & 0xf2) == 0x82) { | ||
192 | if (insn & 0x1000000) { | ||
193 | handle_ldf_stq(insn, regs); | ||
194 | } else { | ||
195 | /* This was a non-faulting load. Just clear the | ||
196 | * destination register(s) and continue with the next | ||
197 | * instruction. -jj | ||
198 | */ | ||
199 | handle_ld_nf(insn, regs); | ||
200 | } | ||
201 | return; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Is this in ex_table? */ | ||
206 | if (regs->tstate & TSTATE_PRIV) { | ||
207 | const struct exception_table_entry *entry; | ||
208 | |||
209 | entry = search_exception_tables(regs->tpc); | ||
210 | if (entry) { | ||
211 | regs->tpc = entry->fixup; | ||
212 | regs->tnpc = regs->tpc + 4; | ||
213 | return; | ||
214 | } | ||
215 | } else { | ||
216 | /* The si_code was set to make clear whether | ||
217 | * this was a SEGV_MAPERR or SEGV_ACCERR fault. | ||
218 | */ | ||
219 | do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | cannot_handle: | ||
224 | unhandled_fault (address, current, regs); | ||
225 | } | ||
226 | |||
227 | asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | ||
228 | { | ||
229 | struct mm_struct *mm = current->mm; | ||
230 | struct vm_area_struct *vma; | ||
231 | unsigned int insn = 0; | ||
232 | int si_code, fault_code, fault; | ||
233 | unsigned long address, mm_rss; | ||
234 | |||
235 | fault_code = get_thread_fault_code(); | ||
236 | |||
237 | if (notify_page_fault(regs)) | ||
238 | return; | ||
239 | |||
240 | si_code = SEGV_MAPERR; | ||
241 | address = current_thread_info()->fault_address; | ||
242 | |||
243 | if ((fault_code & FAULT_CODE_ITLB) && | ||
244 | (fault_code & FAULT_CODE_DTLB)) | ||
245 | BUG(); | ||
246 | |||
247 | if (regs->tstate & TSTATE_PRIV) { | ||
248 | unsigned long tpc = regs->tpc; | ||
249 | |||
250 | /* Sanity check the PC. */ | ||
251 | if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || | ||
252 | (tpc >= MODULES_VADDR && tpc < MODULES_END)) { | ||
253 | /* Valid, no problems... */ | ||
254 | } else { | ||
255 | bad_kernel_pc(regs, address); | ||
256 | return; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * If we're in an interrupt or have no user | ||
262 | * context, we must not take the fault.. | ||
263 | */ | ||
264 | if (in_atomic() || !mm) | ||
265 | goto intr_or_no_mm; | ||
266 | |||
267 | if (test_thread_flag(TIF_32BIT)) { | ||
268 | if (!(regs->tstate & TSTATE_PRIV)) | ||
269 | regs->tpc &= 0xffffffff; | ||
270 | address &= 0xffffffff; | ||
271 | } | ||
272 | |||
273 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
274 | if ((regs->tstate & TSTATE_PRIV) && | ||
275 | !search_exception_tables(regs->tpc)) { | ||
276 | insn = get_fault_insn(regs, insn); | ||
277 | goto handle_kernel_fault; | ||
278 | } | ||
279 | down_read(&mm->mmap_sem); | ||
280 | } | ||
281 | |||
282 | vma = find_vma(mm, address); | ||
283 | if (!vma) | ||
284 | goto bad_area; | ||
285 | |||
286 | /* Pure DTLB misses do not tell us whether the fault causing | ||
287 | * load/store/atomic was a write or not, it only says that there | ||
288 | * was no match. So in such a case we (carefully) read the | ||
289 | * instruction to try and figure this out. It's an optimization | ||
290 | * so it's ok if we can't do this. | ||
291 | * | ||
292 | * Special hack, window spill/fill knows the exact fault type. | ||
293 | */ | ||
294 | if (((fault_code & | ||
295 | (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) && | ||
296 | (vma->vm_flags & VM_WRITE) != 0) { | ||
297 | insn = get_fault_insn(regs, 0); | ||
298 | if (!insn) | ||
299 | goto continue_fault; | ||
300 | /* All loads, stores and atomics have bits 30 and 31 both set | ||
301 | * in the instruction. Bit 21 is set in all stores, but we | ||
302 | * have to avoid prefetches which also have bit 21 set. | ||
303 | */ | ||
304 | if ((insn & 0xc0200000) == 0xc0200000 && | ||
305 | (insn & 0x01780000) != 0x01680000) { | ||
306 | /* Don't bother updating thread struct value, | ||
307 | * because update_mmu_cache only cares which tlb | ||
308 | * the access came from. | ||
309 | */ | ||
310 | fault_code |= FAULT_CODE_WRITE; | ||
311 | } | ||
312 | } | ||
313 | continue_fault: | ||
314 | |||
315 | if (vma->vm_start <= address) | ||
316 | goto good_area; | ||
317 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
318 | goto bad_area; | ||
319 | if (!(fault_code & FAULT_CODE_WRITE)) { | ||
320 | /* Non-faulting loads shouldn't expand stack. */ | ||
321 | insn = get_fault_insn(regs, insn); | ||
322 | if ((insn & 0xc0800000) == 0xc0800000) { | ||
323 | unsigned char asi; | ||
324 | |||
325 | if (insn & 0x2000) | ||
326 | asi = (regs->tstate >> 24); | ||
327 | else | ||
328 | asi = (insn >> 5); | ||
329 | if ((asi & 0xf2) == 0x82) | ||
330 | goto bad_area; | ||
331 | } | ||
332 | } | ||
333 | if (expand_stack(vma, address)) | ||
334 | goto bad_area; | ||
335 | /* | ||
336 | * Ok, we have a good vm_area for this memory access, so | ||
337 | * we can handle it.. | ||
338 | */ | ||
339 | good_area: | ||
340 | si_code = SEGV_ACCERR; | ||
341 | |||
342 | /* If we took a ITLB miss on a non-executable page, catch | ||
343 | * that here. | ||
344 | */ | ||
345 | if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) { | ||
346 | BUG_ON(address != regs->tpc); | ||
347 | BUG_ON(regs->tstate & TSTATE_PRIV); | ||
348 | goto bad_area; | ||
349 | } | ||
350 | |||
351 | if (fault_code & FAULT_CODE_WRITE) { | ||
352 | if (!(vma->vm_flags & VM_WRITE)) | ||
353 | goto bad_area; | ||
354 | |||
355 | /* Spitfire has an icache which does not snoop | ||
356 | * processor stores. Later processors do... | ||
357 | */ | ||
358 | if (tlb_type == spitfire && | ||
359 | (vma->vm_flags & VM_EXEC) != 0 && | ||
360 | vma->vm_file != NULL) | ||
361 | set_thread_fault_code(fault_code | | ||
362 | FAULT_CODE_BLKCOMMIT); | ||
363 | } else { | ||
364 | /* Allow reads even for write-only mappings */ | ||
365 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
366 | goto bad_area; | ||
367 | } | ||
368 | |||
369 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); | ||
370 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
371 | if (fault & VM_FAULT_OOM) | ||
372 | goto out_of_memory; | ||
373 | else if (fault & VM_FAULT_SIGBUS) | ||
374 | goto do_sigbus; | ||
375 | BUG(); | ||
376 | } | ||
377 | if (fault & VM_FAULT_MAJOR) | ||
378 | current->maj_flt++; | ||
379 | else | ||
380 | current->min_flt++; | ||
381 | |||
382 | up_read(&mm->mmap_sem); | ||
383 | |||
384 | mm_rss = get_mm_rss(mm); | ||
385 | #ifdef CONFIG_HUGETLB_PAGE | ||
386 | mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); | ||
387 | #endif | ||
388 | if (unlikely(mm_rss > | ||
389 | mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) | ||
390 | tsb_grow(mm, MM_TSB_BASE, mm_rss); | ||
391 | #ifdef CONFIG_HUGETLB_PAGE | ||
392 | mm_rss = mm->context.huge_pte_count; | ||
393 | if (unlikely(mm_rss > | ||
394 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) | ||
395 | tsb_grow(mm, MM_TSB_HUGE, mm_rss); | ||
396 | #endif | ||
397 | return; | ||
398 | |||
399 | /* | ||
400 | * Something tried to access memory that isn't in our memory map.. | ||
401 | * Fix it, but check if it's kernel or user first.. | ||
402 | */ | ||
403 | bad_area: | ||
404 | insn = get_fault_insn(regs, insn); | ||
405 | up_read(&mm->mmap_sem); | ||
406 | |||
407 | handle_kernel_fault: | ||
408 | do_kernel_fault(regs, si_code, fault_code, insn, address); | ||
409 | return; | ||
410 | |||
411 | /* | ||
412 | * We ran out of memory, or some other thing happened to us that made | ||
413 | * us unable to handle the page fault gracefully. | ||
414 | */ | ||
415 | out_of_memory: | ||
416 | insn = get_fault_insn(regs, insn); | ||
417 | up_read(&mm->mmap_sem); | ||
418 | printk("VM: killing process %s\n", current->comm); | ||
419 | if (!(regs->tstate & TSTATE_PRIV)) | ||
420 | do_group_exit(SIGKILL); | ||
421 | goto handle_kernel_fault; | ||
422 | |||
423 | intr_or_no_mm: | ||
424 | insn = get_fault_insn(regs, 0); | ||
425 | goto handle_kernel_fault; | ||
426 | |||
427 | do_sigbus: | ||
428 | insn = get_fault_insn(regs, insn); | ||
429 | up_read(&mm->mmap_sem); | ||
430 | |||
431 | /* | ||
432 | * Send a sigbus, regardless of whether we were in kernel | ||
433 | * or user mode. | ||
434 | */ | ||
435 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); | ||
436 | |||
437 | /* Kernel mode? Handle exceptions or die */ | ||
438 | if (regs->tstate & TSTATE_PRIV) | ||
439 | goto handle_kernel_fault; | ||
440 | } | ||
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c new file mode 100644 index 000000000000..f362c2037013 --- /dev/null +++ b/arch/sparc/mm/generic_64.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * generic.c: Generic Sparc mm routines that are not dependent upon | ||
3 | * MMU type but are Sparc specific. | ||
4 | * | ||
5 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/swap.h> | ||
11 | #include <linux/pagemap.h> | ||
12 | |||
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | |||
18 | /* Remap IO memory, the same way as remap_pfn_range(), but use | ||
19 | * the obio memory space. | ||
20 | * | ||
21 | * They use a pgprot that sets PAGE_IO and does not check the | ||
22 | * mem_map table as this is independent of normal memory. | ||
23 | */ | ||
24 | static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, | ||
25 | unsigned long address, | ||
26 | unsigned long size, | ||
27 | unsigned long offset, pgprot_t prot, | ||
28 | int space) | ||
29 | { | ||
30 | unsigned long end; | ||
31 | |||
32 | /* clear hack bit that was used as a write_combine side-effect flag */ | ||
33 | offset &= ~0x1UL; | ||
34 | address &= ~PMD_MASK; | ||
35 | end = address + size; | ||
36 | if (end > PMD_SIZE) | ||
37 | end = PMD_SIZE; | ||
38 | do { | ||
39 | pte_t entry; | ||
40 | unsigned long curend = address + PAGE_SIZE; | ||
41 | |||
42 | entry = mk_pte_io(offset, prot, space, PAGE_SIZE); | ||
43 | if (!(address & 0xffff)) { | ||
44 | if (PAGE_SIZE < (4 * 1024 * 1024) && | ||
45 | !(address & 0x3fffff) && | ||
46 | !(offset & 0x3ffffe) && | ||
47 | end >= address + 0x400000) { | ||
48 | entry = mk_pte_io(offset, prot, space, | ||
49 | 4 * 1024 * 1024); | ||
50 | curend = address + 0x400000; | ||
51 | offset += 0x400000; | ||
52 | } else if (PAGE_SIZE < (512 * 1024) && | ||
53 | !(address & 0x7ffff) && | ||
54 | !(offset & 0x7fffe) && | ||
55 | end >= address + 0x80000) { | ||
56 | entry = mk_pte_io(offset, prot, space, | ||
57 | 512 * 1024 * 1024); | ||
58 | curend = address + 0x80000; | ||
59 | offset += 0x80000; | ||
60 | } else if (PAGE_SIZE < (64 * 1024) && | ||
61 | !(offset & 0xfffe) && | ||
62 | end >= address + 0x10000) { | ||
63 | entry = mk_pte_io(offset, prot, space, | ||
64 | 64 * 1024); | ||
65 | curend = address + 0x10000; | ||
66 | offset += 0x10000; | ||
67 | } else | ||
68 | offset += PAGE_SIZE; | ||
69 | } else | ||
70 | offset += PAGE_SIZE; | ||
71 | |||
72 | if (pte_write(entry)) | ||
73 | entry = pte_mkdirty(entry); | ||
74 | do { | ||
75 | BUG_ON(!pte_none(*pte)); | ||
76 | set_pte_at(mm, address, pte, entry); | ||
77 | address += PAGE_SIZE; | ||
78 | pte_val(entry) += PAGE_SIZE; | ||
79 | pte++; | ||
80 | } while (address < curend); | ||
81 | } while (address < end); | ||
82 | } | ||
83 | |||
84 | static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, | ||
85 | unsigned long offset, pgprot_t prot, int space) | ||
86 | { | ||
87 | unsigned long end; | ||
88 | |||
89 | address &= ~PGDIR_MASK; | ||
90 | end = address + size; | ||
91 | if (end > PGDIR_SIZE) | ||
92 | end = PGDIR_SIZE; | ||
93 | offset -= address; | ||
94 | do { | ||
95 | pte_t * pte = pte_alloc_map(mm, pmd, address); | ||
96 | if (!pte) | ||
97 | return -ENOMEM; | ||
98 | io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); | ||
99 | pte_unmap(pte); | ||
100 | address = (address + PMD_SIZE) & PMD_MASK; | ||
101 | pmd++; | ||
102 | } while (address < end); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size, | ||
107 | unsigned long offset, pgprot_t prot, int space) | ||
108 | { | ||
109 | unsigned long end; | ||
110 | |||
111 | address &= ~PUD_MASK; | ||
112 | end = address + size; | ||
113 | if (end > PUD_SIZE) | ||
114 | end = PUD_SIZE; | ||
115 | offset -= address; | ||
116 | do { | ||
117 | pmd_t *pmd = pmd_alloc(mm, pud, address); | ||
118 | if (!pud) | ||
119 | return -ENOMEM; | ||
120 | io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space); | ||
121 | address = (address + PUD_SIZE) & PUD_MASK; | ||
122 | pud++; | ||
123 | } while (address < end); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | ||
128 | unsigned long pfn, unsigned long size, pgprot_t prot) | ||
129 | { | ||
130 | int error = 0; | ||
131 | pgd_t * dir; | ||
132 | unsigned long beg = from; | ||
133 | unsigned long end = from + size; | ||
134 | struct mm_struct *mm = vma->vm_mm; | ||
135 | int space = GET_IOSPACE(pfn); | ||
136 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; | ||
137 | unsigned long phys_base; | ||
138 | |||
139 | phys_base = offset | (((unsigned long) space) << 32UL); | ||
140 | |||
141 | /* See comment in mm/memory.c remap_pfn_range */ | ||
142 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | ||
143 | vma->vm_pgoff = phys_base >> PAGE_SHIFT; | ||
144 | |||
145 | offset -= from; | ||
146 | dir = pgd_offset(mm, from); | ||
147 | flush_cache_range(vma, beg, end); | ||
148 | |||
149 | while (from < end) { | ||
150 | pud_t *pud = pud_alloc(mm, dir, from); | ||
151 | error = -ENOMEM; | ||
152 | if (!pud) | ||
153 | break; | ||
154 | error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space); | ||
155 | if (error) | ||
156 | break; | ||
157 | from = (from + PGDIR_SIZE) & PGDIR_MASK; | ||
158 | dir++; | ||
159 | } | ||
160 | |||
161 | flush_tlb_range(vma, beg, end); | ||
162 | return error; | ||
163 | } | ||
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c new file mode 100644 index 000000000000..f27d10369e0c --- /dev/null +++ b/arch/sparc/mm/hugetlbpage.c | |||
@@ -0,0 +1,357 @@ | |||
1 | /* | ||
2 | * SPARC64 Huge TLB page support. | ||
3 | * | ||
4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) | ||
5 | */ | ||
6 | |||
7 | #include <linux/init.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/hugetlb.h> | ||
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/sysctl.h> | ||
15 | |||
16 | #include <asm/mman.h> | ||
17 | #include <asm/pgalloc.h> | ||
18 | #include <asm/tlb.h> | ||
19 | #include <asm/tlbflush.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | |||
23 | /* Slightly simplified from the non-hugepage variant because by | ||
24 | * definition we don't have to worry about any page coloring stuff | ||
25 | */ | ||
26 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
27 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
28 | |||
29 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | ||
30 | unsigned long addr, | ||
31 | unsigned long len, | ||
32 | unsigned long pgoff, | ||
33 | unsigned long flags) | ||
34 | { | ||
35 | struct mm_struct *mm = current->mm; | ||
36 | struct vm_area_struct * vma; | ||
37 | unsigned long task_size = TASK_SIZE; | ||
38 | unsigned long start_addr; | ||
39 | |||
40 | if (test_thread_flag(TIF_32BIT)) | ||
41 | task_size = STACK_TOP32; | ||
42 | if (unlikely(len >= VA_EXCLUDE_START)) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | if (len > mm->cached_hole_size) { | ||
46 | start_addr = addr = mm->free_area_cache; | ||
47 | } else { | ||
48 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
49 | mm->cached_hole_size = 0; | ||
50 | } | ||
51 | |||
52 | task_size -= len; | ||
53 | |||
54 | full_search: | ||
55 | addr = ALIGN(addr, HPAGE_SIZE); | ||
56 | |||
57 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
58 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
59 | if (addr < VA_EXCLUDE_START && | ||
60 | (addr + len) >= VA_EXCLUDE_START) { | ||
61 | addr = VA_EXCLUDE_END; | ||
62 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
63 | } | ||
64 | if (unlikely(task_size < addr)) { | ||
65 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
66 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
67 | mm->cached_hole_size = 0; | ||
68 | goto full_search; | ||
69 | } | ||
70 | return -ENOMEM; | ||
71 | } | ||
72 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
73 | /* | ||
74 | * Remember the place where we stopped the search: | ||
75 | */ | ||
76 | mm->free_area_cache = addr + len; | ||
77 | return addr; | ||
78 | } | ||
79 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
80 | mm->cached_hole_size = vma->vm_start - addr; | ||
81 | |||
82 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static unsigned long | ||
87 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
88 | const unsigned long len, | ||
89 | const unsigned long pgoff, | ||
90 | const unsigned long flags) | ||
91 | { | ||
92 | struct vm_area_struct *vma; | ||
93 | struct mm_struct *mm = current->mm; | ||
94 | unsigned long addr = addr0; | ||
95 | |||
96 | /* This should only ever run for 32-bit processes. */ | ||
97 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
98 | |||
99 | /* check if free_area_cache is useful for us */ | ||
100 | if (len <= mm->cached_hole_size) { | ||
101 | mm->cached_hole_size = 0; | ||
102 | mm->free_area_cache = mm->mmap_base; | ||
103 | } | ||
104 | |||
105 | /* either no address requested or can't fit in requested address hole */ | ||
106 | addr = mm->free_area_cache & HPAGE_MASK; | ||
107 | |||
108 | /* make sure it can fit in the remaining address space */ | ||
109 | if (likely(addr > len)) { | ||
110 | vma = find_vma(mm, addr-len); | ||
111 | if (!vma || addr <= vma->vm_start) { | ||
112 | /* remember the address as a hint for next time */ | ||
113 | return (mm->free_area_cache = addr-len); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | if (unlikely(mm->mmap_base < len)) | ||
118 | goto bottomup; | ||
119 | |||
120 | addr = (mm->mmap_base-len) & HPAGE_MASK; | ||
121 | |||
122 | do { | ||
123 | /* | ||
124 | * Lookup failure means no vma is above this address, | ||
125 | * else if new region fits below vma->vm_start, | ||
126 | * return with success: | ||
127 | */ | ||
128 | vma = find_vma(mm, addr); | ||
129 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
130 | /* remember the address as a hint for next time */ | ||
131 | return (mm->free_area_cache = addr); | ||
132 | } | ||
133 | |||
134 | /* remember the largest hole we saw so far */ | ||
135 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
136 | mm->cached_hole_size = vma->vm_start - addr; | ||
137 | |||
138 | /* try just below the current vma->vm_start */ | ||
139 | addr = (vma->vm_start-len) & HPAGE_MASK; | ||
140 | } while (likely(len < vma->vm_start)); | ||
141 | |||
142 | bottomup: | ||
143 | /* | ||
144 | * A failed mmap() very likely causes application failure, | ||
145 | * so fall back to the bottom-up function here. This scenario | ||
146 | * can happen with large stack limits and large mmap() | ||
147 | * allocations. | ||
148 | */ | ||
149 | mm->cached_hole_size = ~0UL; | ||
150 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
151 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
152 | /* | ||
153 | * Restore the topdown base: | ||
154 | */ | ||
155 | mm->free_area_cache = mm->mmap_base; | ||
156 | mm->cached_hole_size = ~0UL; | ||
157 | |||
158 | return addr; | ||
159 | } | ||
160 | |||
161 | unsigned long | ||
162 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
163 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
164 | { | ||
165 | struct mm_struct *mm = current->mm; | ||
166 | struct vm_area_struct *vma; | ||
167 | unsigned long task_size = TASK_SIZE; | ||
168 | |||
169 | if (test_thread_flag(TIF_32BIT)) | ||
170 | task_size = STACK_TOP32; | ||
171 | |||
172 | if (len & ~HPAGE_MASK) | ||
173 | return -EINVAL; | ||
174 | if (len > task_size) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | if (flags & MAP_FIXED) { | ||
178 | if (prepare_hugepage_range(file, addr, len)) | ||
179 | return -EINVAL; | ||
180 | return addr; | ||
181 | } | ||
182 | |||
183 | if (addr) { | ||
184 | addr = ALIGN(addr, HPAGE_SIZE); | ||
185 | vma = find_vma(mm, addr); | ||
186 | if (task_size - len >= addr && | ||
187 | (!vma || addr + len <= vma->vm_start)) | ||
188 | return addr; | ||
189 | } | ||
190 | if (mm->get_unmapped_area == arch_get_unmapped_area) | ||
191 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
192 | pgoff, flags); | ||
193 | else | ||
194 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
195 | pgoff, flags); | ||
196 | } | ||
197 | |||
198 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
199 | unsigned long addr, unsigned long sz) | ||
200 | { | ||
201 | pgd_t *pgd; | ||
202 | pud_t *pud; | ||
203 | pmd_t *pmd; | ||
204 | pte_t *pte = NULL; | ||
205 | |||
206 | /* We must align the address, because our caller will run | ||
207 | * set_huge_pte_at() on whatever we return, which writes out | ||
208 | * all of the sub-ptes for the hugepage range. So we have | ||
209 | * to give it the first such sub-pte. | ||
210 | */ | ||
211 | addr &= HPAGE_MASK; | ||
212 | |||
213 | pgd = pgd_offset(mm, addr); | ||
214 | pud = pud_alloc(mm, pgd, addr); | ||
215 | if (pud) { | ||
216 | pmd = pmd_alloc(mm, pud, addr); | ||
217 | if (pmd) | ||
218 | pte = pte_alloc_map(mm, pmd, addr); | ||
219 | } | ||
220 | return pte; | ||
221 | } | ||
222 | |||
223 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
224 | { | ||
225 | pgd_t *pgd; | ||
226 | pud_t *pud; | ||
227 | pmd_t *pmd; | ||
228 | pte_t *pte = NULL; | ||
229 | |||
230 | addr &= HPAGE_MASK; | ||
231 | |||
232 | pgd = pgd_offset(mm, addr); | ||
233 | if (!pgd_none(*pgd)) { | ||
234 | pud = pud_offset(pgd, addr); | ||
235 | if (!pud_none(*pud)) { | ||
236 | pmd = pmd_offset(pud, addr); | ||
237 | if (!pmd_none(*pmd)) | ||
238 | pte = pte_offset_map(pmd, addr); | ||
239 | } | ||
240 | } | ||
241 | return pte; | ||
242 | } | ||
243 | |||
244 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
245 | { | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
250 | pte_t *ptep, pte_t entry) | ||
251 | { | ||
252 | int i; | ||
253 | |||
254 | if (!pte_present(*ptep) && pte_present(entry)) | ||
255 | mm->context.huge_pte_count++; | ||
256 | |||
257 | addr &= HPAGE_MASK; | ||
258 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
259 | set_pte_at(mm, addr, ptep, entry); | ||
260 | ptep++; | ||
261 | addr += PAGE_SIZE; | ||
262 | pte_val(entry) += PAGE_SIZE; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
267 | pte_t *ptep) | ||
268 | { | ||
269 | pte_t entry; | ||
270 | int i; | ||
271 | |||
272 | entry = *ptep; | ||
273 | if (pte_present(entry)) | ||
274 | mm->context.huge_pte_count--; | ||
275 | |||
276 | addr &= HPAGE_MASK; | ||
277 | |||
278 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
279 | pte_clear(mm, addr, ptep); | ||
280 | addr += PAGE_SIZE; | ||
281 | ptep++; | ||
282 | } | ||
283 | |||
284 | return entry; | ||
285 | } | ||
286 | |||
287 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
288 | unsigned long address, int write) | ||
289 | { | ||
290 | return ERR_PTR(-EINVAL); | ||
291 | } | ||
292 | |||
293 | int pmd_huge(pmd_t pmd) | ||
294 | { | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | int pud_huge(pud_t pud) | ||
299 | { | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
304 | pmd_t *pmd, int write) | ||
305 | { | ||
306 | return NULL; | ||
307 | } | ||
308 | |||
309 | static void context_reload(void *__data) | ||
310 | { | ||
311 | struct mm_struct *mm = __data; | ||
312 | |||
313 | if (mm == current->mm) | ||
314 | load_secondary_context(mm); | ||
315 | } | ||
316 | |||
317 | void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
318 | { | ||
319 | struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | ||
320 | |||
321 | if (likely(tp->tsb != NULL)) | ||
322 | return; | ||
323 | |||
324 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
325 | tsb_context_switch(mm); | ||
326 | smp_tsb_sync(mm); | ||
327 | |||
328 | /* On UltraSPARC-III+ and later, configure the second half of | ||
329 | * the Data-TLB for huge pages. | ||
330 | */ | ||
331 | if (tlb_type == cheetah_plus) { | ||
332 | unsigned long ctx; | ||
333 | |||
334 | spin_lock(&ctx_alloc_lock); | ||
335 | ctx = mm->context.sparc64_ctx_val; | ||
336 | ctx &= ~CTX_PGSZ_MASK; | ||
337 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | ||
338 | ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; | ||
339 | |||
340 | if (ctx != mm->context.sparc64_ctx_val) { | ||
341 | /* When changing the page size fields, we | ||
342 | * must perform a context flush so that no | ||
343 | * stale entries match. This flush must | ||
344 | * occur with the original context register | ||
345 | * settings. | ||
346 | */ | ||
347 | do_flush_tlb_mm(mm); | ||
348 | |||
349 | /* Reload the context register of all processors | ||
350 | * also executing in this address space. | ||
351 | */ | ||
352 | mm->context.sparc64_ctx_val = ctx; | ||
353 | on_each_cpu(context_reload, mm, 0); | ||
354 | } | ||
355 | spin_unlock(&ctx_alloc_lock); | ||
356 | } | ||
357 | } | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c new file mode 100644 index 000000000000..6ea73da29312 --- /dev/null +++ b/arch/sparc/mm/init_64.c | |||
@@ -0,0 +1,2360 @@ | |||
1 | /* | ||
2 | * arch/sparc64/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/bootmem.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/hugetlb.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/initrd.h> | ||
18 | #include <linux/swap.h> | ||
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/poison.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | #include <linux/kprobes.h> | ||
24 | #include <linux/cache.h> | ||
25 | #include <linux/sort.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/lmb.h> | ||
28 | #include <linux/mmzone.h> | ||
29 | |||
30 | #include <asm/head.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/page.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/iommu.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/tlbflush.h> | ||
41 | #include <asm/dma.h> | ||
42 | #include <asm/starfire.h> | ||
43 | #include <asm/tlb.h> | ||
44 | #include <asm/spitfire.h> | ||
45 | #include <asm/sections.h> | ||
46 | #include <asm/tsb.h> | ||
47 | #include <asm/hypervisor.h> | ||
48 | #include <asm/prom.h> | ||
49 | #include <asm/mdesc.h> | ||
50 | #include <asm/cpudata.h> | ||
51 | #include <asm/irq.h> | ||
52 | |||
53 | #include "init_64.h" | ||
54 | |||
55 | unsigned long kern_linear_pte_xor[2] __read_mostly; | ||
56 | |||
57 | /* A bitmap, one bit for every 256MB of physical memory. If the bit | ||
58 | * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else | ||
59 | * if set we should use a 256MB page (via kern_linear_pte_xor[1]). | ||
60 | */ | ||
61 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
62 | |||
63 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
64 | /* A special kernel TSB for 4MB and 256MB linear mappings. | ||
65 | * Space is allocated for this right after the trap table | ||
66 | * in arch/sparc64/kernel/head.S | ||
67 | */ | ||
68 | extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | ||
69 | #endif | ||
70 | |||
71 | #define MAX_BANKS 32 | ||
72 | |||
73 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | ||
74 | static int pavail_ents __initdata; | ||
75 | |||
76 | static int cmp_p64(const void *a, const void *b) | ||
77 | { | ||
78 | const struct linux_prom64_registers *x = a, *y = b; | ||
79 | |||
80 | if (x->phys_addr > y->phys_addr) | ||
81 | return 1; | ||
82 | if (x->phys_addr < y->phys_addr) | ||
83 | return -1; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static void __init read_obp_memory(const char *property, | ||
88 | struct linux_prom64_registers *regs, | ||
89 | int *num_ents) | ||
90 | { | ||
91 | int node = prom_finddevice("/memory"); | ||
92 | int prop_size = prom_getproplen(node, property); | ||
93 | int ents, ret, i; | ||
94 | |||
95 | ents = prop_size / sizeof(struct linux_prom64_registers); | ||
96 | if (ents > MAX_BANKS) { | ||
97 | prom_printf("The machine has more %s property entries than " | ||
98 | "this kernel can support (%d).\n", | ||
99 | property, MAX_BANKS); | ||
100 | prom_halt(); | ||
101 | } | ||
102 | |||
103 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | ||
104 | if (ret == -1) { | ||
105 | prom_printf("Couldn't get %s property from /memory.\n"); | ||
106 | prom_halt(); | ||
107 | } | ||
108 | |||
109 | /* Sanitize what we got from the firmware, by page aligning | ||
110 | * everything. | ||
111 | */ | ||
112 | for (i = 0; i < ents; i++) { | ||
113 | unsigned long base, size; | ||
114 | |||
115 | base = regs[i].phys_addr; | ||
116 | size = regs[i].reg_size; | ||
117 | |||
118 | size &= PAGE_MASK; | ||
119 | if (base & ~PAGE_MASK) { | ||
120 | unsigned long new_base = PAGE_ALIGN(base); | ||
121 | |||
122 | size -= new_base - base; | ||
123 | if ((long) size < 0L) | ||
124 | size = 0UL; | ||
125 | base = new_base; | ||
126 | } | ||
127 | if (size == 0UL) { | ||
128 | /* If it is empty, simply get rid of it. | ||
129 | * This simplifies the logic of the other | ||
130 | * functions that process these arrays. | ||
131 | */ | ||
132 | memmove(®s[i], ®s[i + 1], | ||
133 | (ents - i - 1) * sizeof(regs[0])); | ||
134 | i--; | ||
135 | ents--; | ||
136 | continue; | ||
137 | } | ||
138 | regs[i].phys_addr = base; | ||
139 | regs[i].reg_size = size; | ||
140 | } | ||
141 | |||
142 | *num_ents = ents; | ||
143 | |||
144 | sort(regs, ents, sizeof(struct linux_prom64_registers), | ||
145 | cmp_p64, NULL); | ||
146 | } | ||
147 | |||
148 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | ||
149 | |||
150 | /* Kernel physical address base and size in bytes. */ | ||
151 | unsigned long kern_base __read_mostly; | ||
152 | unsigned long kern_size __read_mostly; | ||
153 | |||
154 | /* Initial ramdisk setup */ | ||
155 | extern unsigned long sparc_ramdisk_image64; | ||
156 | extern unsigned int sparc_ramdisk_image; | ||
157 | extern unsigned int sparc_ramdisk_size; | ||
158 | |||
159 | struct page *mem_map_zero __read_mostly; | ||
160 | EXPORT_SYMBOL(mem_map_zero); | ||
161 | |||
162 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; | ||
163 | |||
164 | unsigned long sparc64_kern_pri_context __read_mostly; | ||
165 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | ||
166 | unsigned long sparc64_kern_sec_context __read_mostly; | ||
167 | |||
168 | int num_kernel_image_mappings; | ||
169 | |||
170 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
171 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | ||
172 | #ifdef CONFIG_SMP | ||
173 | atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | ||
174 | #endif | ||
175 | #endif | ||
176 | |||
177 | inline void flush_dcache_page_impl(struct page *page) | ||
178 | { | ||
179 | BUG_ON(tlb_type == hypervisor); | ||
180 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
181 | atomic_inc(&dcpage_flushes); | ||
182 | #endif | ||
183 | |||
184 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
185 | __flush_dcache_page(page_address(page), | ||
186 | ((tlb_type == spitfire) && | ||
187 | page_mapping(page) != NULL)); | ||
188 | #else | ||
189 | if (page_mapping(page) != NULL && | ||
190 | tlb_type == spitfire) | ||
191 | __flush_icache_page(__pa(page_address(page))); | ||
192 | #endif | ||
193 | } | ||
194 | |||
195 | #define PG_dcache_dirty PG_arch_1 | ||
196 | #define PG_dcache_cpu_shift 32UL | ||
197 | #define PG_dcache_cpu_mask \ | ||
198 | ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) | ||
199 | |||
200 | #define dcache_dirty_cpu(page) \ | ||
201 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) | ||
202 | |||
203 | static inline void set_dcache_dirty(struct page *page, int this_cpu) | ||
204 | { | ||
205 | unsigned long mask = this_cpu; | ||
206 | unsigned long non_cpu_bits; | ||
207 | |||
208 | non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); | ||
209 | mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); | ||
210 | |||
211 | __asm__ __volatile__("1:\n\t" | ||
212 | "ldx [%2], %%g7\n\t" | ||
213 | "and %%g7, %1, %%g1\n\t" | ||
214 | "or %%g1, %0, %%g1\n\t" | ||
215 | "casx [%2], %%g7, %%g1\n\t" | ||
216 | "cmp %%g7, %%g1\n\t" | ||
217 | "bne,pn %%xcc, 1b\n\t" | ||
218 | " nop" | ||
219 | : /* no outputs */ | ||
220 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | ||
221 | : "g1", "g7"); | ||
222 | } | ||
223 | |||
224 | static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) | ||
225 | { | ||
226 | unsigned long mask = (1UL << PG_dcache_dirty); | ||
227 | |||
228 | __asm__ __volatile__("! test_and_clear_dcache_dirty\n" | ||
229 | "1:\n\t" | ||
230 | "ldx [%2], %%g7\n\t" | ||
231 | "srlx %%g7, %4, %%g1\n\t" | ||
232 | "and %%g1, %3, %%g1\n\t" | ||
233 | "cmp %%g1, %0\n\t" | ||
234 | "bne,pn %%icc, 2f\n\t" | ||
235 | " andn %%g7, %1, %%g1\n\t" | ||
236 | "casx [%2], %%g7, %%g1\n\t" | ||
237 | "cmp %%g7, %%g1\n\t" | ||
238 | "bne,pn %%xcc, 1b\n\t" | ||
239 | " nop\n" | ||
240 | "2:" | ||
241 | : /* no outputs */ | ||
242 | : "r" (cpu), "r" (mask), "r" (&page->flags), | ||
243 | "i" (PG_dcache_cpu_mask), | ||
244 | "i" (PG_dcache_cpu_shift) | ||
245 | : "g1", "g7"); | ||
246 | } | ||
247 | |||
248 | static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) | ||
249 | { | ||
250 | unsigned long tsb_addr = (unsigned long) ent; | ||
251 | |||
252 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
253 | tsb_addr = __pa(tsb_addr); | ||
254 | |||
255 | __tsb_insert(tsb_addr, tag, pte); | ||
256 | } | ||
257 | |||
258 | unsigned long _PAGE_ALL_SZ_BITS __read_mostly; | ||
259 | unsigned long _PAGE_SZBITS __read_mostly; | ||
260 | |||
261 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
262 | { | ||
263 | struct mm_struct *mm; | ||
264 | struct tsb *tsb; | ||
265 | unsigned long tag, flags; | ||
266 | unsigned long tsb_index, tsb_hash_shift; | ||
267 | |||
268 | if (tlb_type != hypervisor) { | ||
269 | unsigned long pfn = pte_pfn(pte); | ||
270 | unsigned long pg_flags; | ||
271 | struct page *page; | ||
272 | |||
273 | if (pfn_valid(pfn) && | ||
274 | (page = pfn_to_page(pfn), page_mapping(page)) && | ||
275 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | ||
276 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & | ||
277 | PG_dcache_cpu_mask); | ||
278 | int this_cpu = get_cpu(); | ||
279 | |||
280 | /* This is just to optimize away some function calls | ||
281 | * in the SMP case. | ||
282 | */ | ||
283 | if (cpu == this_cpu) | ||
284 | flush_dcache_page_impl(page); | ||
285 | else | ||
286 | smp_flush_dcache_page_impl(page, cpu); | ||
287 | |||
288 | clear_dcache_dirty_cpu(page, cpu); | ||
289 | |||
290 | put_cpu(); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | mm = vma->vm_mm; | ||
295 | |||
296 | tsb_index = MM_TSB_BASE; | ||
297 | tsb_hash_shift = PAGE_SHIFT; | ||
298 | |||
299 | spin_lock_irqsave(&mm->context.lock, flags); | ||
300 | |||
301 | #ifdef CONFIG_HUGETLB_PAGE | ||
302 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { | ||
303 | if ((tlb_type == hypervisor && | ||
304 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | ||
305 | (tlb_type != hypervisor && | ||
306 | (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { | ||
307 | tsb_index = MM_TSB_HUGE; | ||
308 | tsb_hash_shift = HPAGE_SHIFT; | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | tsb = mm->context.tsb_block[tsb_index].tsb; | ||
314 | tsb += ((address >> tsb_hash_shift) & | ||
315 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | ||
316 | tag = (address >> 22UL); | ||
317 | tsb_insert(tsb, tag, pte_val(pte)); | ||
318 | |||
319 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
320 | } | ||
321 | |||
322 | void flush_dcache_page(struct page *page) | ||
323 | { | ||
324 | struct address_space *mapping; | ||
325 | int this_cpu; | ||
326 | |||
327 | if (tlb_type == hypervisor) | ||
328 | return; | ||
329 | |||
330 | /* Do not bother with the expensive D-cache flush if it | ||
331 | * is merely the zero page. The 'bigcore' testcase in GDB | ||
332 | * causes this case to run millions of times. | ||
333 | */ | ||
334 | if (page == ZERO_PAGE(0)) | ||
335 | return; | ||
336 | |||
337 | this_cpu = get_cpu(); | ||
338 | |||
339 | mapping = page_mapping(page); | ||
340 | if (mapping && !mapping_mapped(mapping)) { | ||
341 | int dirty = test_bit(PG_dcache_dirty, &page->flags); | ||
342 | if (dirty) { | ||
343 | int dirty_cpu = dcache_dirty_cpu(page); | ||
344 | |||
345 | if (dirty_cpu == this_cpu) | ||
346 | goto out; | ||
347 | smp_flush_dcache_page_impl(page, dirty_cpu); | ||
348 | } | ||
349 | set_dcache_dirty(page, this_cpu); | ||
350 | } else { | ||
351 | /* We could delay the flush for the !page_mapping | ||
352 | * case too. But that case is for exec env/arg | ||
353 | * pages and those are %99 certainly going to get | ||
354 | * faulted into the tlb (and thus flushed) anyways. | ||
355 | */ | ||
356 | flush_dcache_page_impl(page); | ||
357 | } | ||
358 | |||
359 | out: | ||
360 | put_cpu(); | ||
361 | } | ||
362 | |||
363 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) | ||
364 | { | ||
365 | /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ | ||
366 | if (tlb_type == spitfire) { | ||
367 | unsigned long kaddr; | ||
368 | |||
369 | /* This code only runs on Spitfire cpus so this is | ||
370 | * why we can assume _PAGE_PADDR_4U. | ||
371 | */ | ||
372 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { | ||
373 | unsigned long paddr, mask = _PAGE_PADDR_4U; | ||
374 | |||
375 | if (kaddr >= PAGE_OFFSET) | ||
376 | paddr = kaddr & mask; | ||
377 | else { | ||
378 | pgd_t *pgdp = pgd_offset_k(kaddr); | ||
379 | pud_t *pudp = pud_offset(pgdp, kaddr); | ||
380 | pmd_t *pmdp = pmd_offset(pudp, kaddr); | ||
381 | pte_t *ptep = pte_offset_kernel(pmdp, kaddr); | ||
382 | |||
383 | paddr = pte_val(*ptep) & mask; | ||
384 | } | ||
385 | __flush_icache_page(paddr); | ||
386 | } | ||
387 | } | ||
388 | } | ||
389 | |||
390 | void mmu_info(struct seq_file *m) | ||
391 | { | ||
392 | if (tlb_type == cheetah) | ||
393 | seq_printf(m, "MMU Type\t: Cheetah\n"); | ||
394 | else if (tlb_type == cheetah_plus) | ||
395 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | ||
396 | else if (tlb_type == spitfire) | ||
397 | seq_printf(m, "MMU Type\t: Spitfire\n"); | ||
398 | else if (tlb_type == hypervisor) | ||
399 | seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); | ||
400 | else | ||
401 | seq_printf(m, "MMU Type\t: ???\n"); | ||
402 | |||
403 | #ifdef CONFIG_DEBUG_DCFLUSH | ||
404 | seq_printf(m, "DCPageFlushes\t: %d\n", | ||
405 | atomic_read(&dcpage_flushes)); | ||
406 | #ifdef CONFIG_SMP | ||
407 | seq_printf(m, "DCPageFlushesXC\t: %d\n", | ||
408 | atomic_read(&dcpage_flushes_xcall)); | ||
409 | #endif /* CONFIG_SMP */ | ||
410 | #endif /* CONFIG_DEBUG_DCFLUSH */ | ||
411 | } | ||
412 | |||
413 | struct linux_prom_translation prom_trans[512] __read_mostly; | ||
414 | unsigned int prom_trans_ents __read_mostly; | ||
415 | |||
416 | unsigned long kern_locked_tte_data; | ||
417 | |||
418 | /* The obp translations are saved based on 8k pagesize, since obp can | ||
419 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | ||
420 | * HI_OBP_ADDRESS range are handled in ktlb.S. | ||
421 | */ | ||
422 | static inline int in_obp_range(unsigned long vaddr) | ||
423 | { | ||
424 | return (vaddr >= LOW_OBP_ADDRESS && | ||
425 | vaddr < HI_OBP_ADDRESS); | ||
426 | } | ||
427 | |||
428 | static int cmp_ptrans(const void *a, const void *b) | ||
429 | { | ||
430 | const struct linux_prom_translation *x = a, *y = b; | ||
431 | |||
432 | if (x->virt > y->virt) | ||
433 | return 1; | ||
434 | if (x->virt < y->virt) | ||
435 | return -1; | ||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | /* Read OBP translations property into 'prom_trans[]'. */ | ||
440 | static void __init read_obp_translations(void) | ||
441 | { | ||
442 | int n, node, ents, first, last, i; | ||
443 | |||
444 | node = prom_finddevice("/virtual-memory"); | ||
445 | n = prom_getproplen(node, "translations"); | ||
446 | if (unlikely(n == 0 || n == -1)) { | ||
447 | prom_printf("prom_mappings: Couldn't get size.\n"); | ||
448 | prom_halt(); | ||
449 | } | ||
450 | if (unlikely(n > sizeof(prom_trans))) { | ||
451 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | ||
452 | prom_halt(); | ||
453 | } | ||
454 | |||
455 | if ((n = prom_getproperty(node, "translations", | ||
456 | (char *)&prom_trans[0], | ||
457 | sizeof(prom_trans))) == -1) { | ||
458 | prom_printf("prom_mappings: Couldn't get property.\n"); | ||
459 | prom_halt(); | ||
460 | } | ||
461 | |||
462 | n = n / sizeof(struct linux_prom_translation); | ||
463 | |||
464 | ents = n; | ||
465 | |||
466 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), | ||
467 | cmp_ptrans, NULL); | ||
468 | |||
469 | /* Now kick out all the non-OBP entries. */ | ||
470 | for (i = 0; i < ents; i++) { | ||
471 | if (in_obp_range(prom_trans[i].virt)) | ||
472 | break; | ||
473 | } | ||
474 | first = i; | ||
475 | for (; i < ents; i++) { | ||
476 | if (!in_obp_range(prom_trans[i].virt)) | ||
477 | break; | ||
478 | } | ||
479 | last = i; | ||
480 | |||
481 | for (i = 0; i < (last - first); i++) { | ||
482 | struct linux_prom_translation *src = &prom_trans[i + first]; | ||
483 | struct linux_prom_translation *dest = &prom_trans[i]; | ||
484 | |||
485 | *dest = *src; | ||
486 | } | ||
487 | for (; i < ents; i++) { | ||
488 | struct linux_prom_translation *dest = &prom_trans[i]; | ||
489 | dest->virt = dest->size = dest->data = 0x0UL; | ||
490 | } | ||
491 | |||
492 | prom_trans_ents = last - first; | ||
493 | |||
494 | if (tlb_type == spitfire) { | ||
495 | /* Clear diag TTE bits. */ | ||
496 | for (i = 0; i < prom_trans_ents; i++) | ||
497 | prom_trans[i].data &= ~0x0003fe0000000000UL; | ||
498 | } | ||
499 | } | ||
500 | |||
501 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | ||
502 | unsigned long pte, | ||
503 | unsigned long mmu) | ||
504 | { | ||
505 | unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); | ||
506 | |||
507 | if (ret != 0) { | ||
508 | prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " | ||
509 | "errors with %lx\n", vaddr, 0, pte, mmu, ret); | ||
510 | prom_halt(); | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static unsigned long kern_large_tte(unsigned long paddr); | ||
515 | |||
516 | static void __init remap_kernel(void) | ||
517 | { | ||
518 | unsigned long phys_page, tte_vaddr, tte_data; | ||
519 | int i, tlb_ent = sparc64_highest_locked_tlbent(); | ||
520 | |||
521 | tte_vaddr = (unsigned long) KERNBASE; | ||
522 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
523 | tte_data = kern_large_tte(phys_page); | ||
524 | |||
525 | kern_locked_tte_data = tte_data; | ||
526 | |||
527 | /* Now lock us into the TLBs via Hypervisor or OBP. */ | ||
528 | if (tlb_type == hypervisor) { | ||
529 | for (i = 0; i < num_kernel_image_mappings; i++) { | ||
530 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | ||
531 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | ||
532 | tte_vaddr += 0x400000; | ||
533 | tte_data += 0x400000; | ||
534 | } | ||
535 | } else { | ||
536 | for (i = 0; i < num_kernel_image_mappings; i++) { | ||
537 | prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); | ||
538 | prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); | ||
539 | tte_vaddr += 0x400000; | ||
540 | tte_data += 0x400000; | ||
541 | } | ||
542 | sparc64_highest_unlocked_tlb_ent = tlb_ent - i; | ||
543 | } | ||
544 | if (tlb_type == cheetah_plus) { | ||
545 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | ||
546 | CTX_CHEETAH_PLUS_NUC); | ||
547 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | ||
548 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | |||
553 | static void __init inherit_prom_mappings(void) | ||
554 | { | ||
555 | /* Now fixup OBP's idea about where we really are mapped. */ | ||
556 | printk("Remapping the kernel... "); | ||
557 | remap_kernel(); | ||
558 | printk("done.\n"); | ||
559 | } | ||
560 | |||
561 | void prom_world(int enter) | ||
562 | { | ||
563 | if (!enter) | ||
564 | set_fs((mm_segment_t) { get_thread_current_ds() }); | ||
565 | |||
566 | __asm__ __volatile__("flushw"); | ||
567 | } | ||
568 | |||
569 | void __flush_dcache_range(unsigned long start, unsigned long end) | ||
570 | { | ||
571 | unsigned long va; | ||
572 | |||
573 | if (tlb_type == spitfire) { | ||
574 | int n = 0; | ||
575 | |||
576 | for (va = start; va < end; va += 32) { | ||
577 | spitfire_put_dcache_tag(va & 0x3fe0, 0x0); | ||
578 | if (++n >= 512) | ||
579 | break; | ||
580 | } | ||
581 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
582 | start = __pa(start); | ||
583 | end = __pa(end); | ||
584 | for (va = start; va < end; va += 32) | ||
585 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
586 | "membar #Sync" | ||
587 | : /* no outputs */ | ||
588 | : "r" (va), | ||
589 | "i" (ASI_DCACHE_INVALIDATE)); | ||
590 | } | ||
591 | } | ||
592 | |||
593 | /* get_new_mmu_context() uses "cache + 1". */ | ||
594 | DEFINE_SPINLOCK(ctx_alloc_lock); | ||
595 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | ||
596 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) | ||
597 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) | ||
598 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | ||
599 | |||
600 | /* Caller does TLB context flushing on local CPU if necessary. | ||
601 | * The caller also ensures that CTX_VALID(mm->context) is false. | ||
602 | * | ||
603 | * We must be careful about boundary cases so that we never | ||
604 | * let the user have CTX 0 (nucleus) or we ever use a CTX | ||
605 | * version of zero (and thus NO_CONTEXT would not be caught | ||
606 | * by version mis-match tests in mmu_context.h). | ||
607 | * | ||
608 | * Always invoked with interrupts disabled. | ||
609 | */ | ||
610 | void get_new_mmu_context(struct mm_struct *mm) | ||
611 | { | ||
612 | unsigned long ctx, new_ctx; | ||
613 | unsigned long orig_pgsz_bits; | ||
614 | unsigned long flags; | ||
615 | int new_version; | ||
616 | |||
617 | spin_lock_irqsave(&ctx_alloc_lock, flags); | ||
618 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | ||
619 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | ||
620 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | ||
621 | new_version = 0; | ||
622 | if (new_ctx >= (1 << CTX_NR_BITS)) { | ||
623 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | ||
624 | if (new_ctx >= ctx) { | ||
625 | int i; | ||
626 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | ||
627 | CTX_FIRST_VERSION; | ||
628 | if (new_ctx == 1) | ||
629 | new_ctx = CTX_FIRST_VERSION; | ||
630 | |||
631 | /* Don't call memset, for 16 entries that's just | ||
632 | * plain silly... | ||
633 | */ | ||
634 | mmu_context_bmap[0] = 3; | ||
635 | mmu_context_bmap[1] = 0; | ||
636 | mmu_context_bmap[2] = 0; | ||
637 | mmu_context_bmap[3] = 0; | ||
638 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | ||
639 | mmu_context_bmap[i + 0] = 0; | ||
640 | mmu_context_bmap[i + 1] = 0; | ||
641 | mmu_context_bmap[i + 2] = 0; | ||
642 | mmu_context_bmap[i + 3] = 0; | ||
643 | } | ||
644 | new_version = 1; | ||
645 | goto out; | ||
646 | } | ||
647 | } | ||
648 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | ||
649 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | ||
650 | out: | ||
651 | tlb_context_cache = new_ctx; | ||
652 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | ||
653 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | ||
654 | |||
655 | if (unlikely(new_version)) | ||
656 | smp_new_mmu_context_version(); | ||
657 | } | ||
658 | |||
659 | static int numa_enabled = 1; | ||
660 | static int numa_debug; | ||
661 | |||
662 | static int __init early_numa(char *p) | ||
663 | { | ||
664 | if (!p) | ||
665 | return 0; | ||
666 | |||
667 | if (strstr(p, "off")) | ||
668 | numa_enabled = 0; | ||
669 | |||
670 | if (strstr(p, "debug")) | ||
671 | numa_debug = 1; | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | early_param("numa", early_numa); | ||
676 | |||
677 | #define numadbg(f, a...) \ | ||
678 | do { if (numa_debug) \ | ||
679 | printk(KERN_INFO f, ## a); \ | ||
680 | } while (0) | ||
681 | |||
682 | static void __init find_ramdisk(unsigned long phys_base) | ||
683 | { | ||
684 | #ifdef CONFIG_BLK_DEV_INITRD | ||
685 | if (sparc_ramdisk_image || sparc_ramdisk_image64) { | ||
686 | unsigned long ramdisk_image; | ||
687 | |||
688 | /* Older versions of the bootloader only supported a | ||
689 | * 32-bit physical address for the ramdisk image | ||
690 | * location, stored at sparc_ramdisk_image. Newer | ||
691 | * SILO versions set sparc_ramdisk_image to zero and | ||
692 | * provide a full 64-bit physical address at | ||
693 | * sparc_ramdisk_image64. | ||
694 | */ | ||
695 | ramdisk_image = sparc_ramdisk_image; | ||
696 | if (!ramdisk_image) | ||
697 | ramdisk_image = sparc_ramdisk_image64; | ||
698 | |||
699 | /* Another bootloader quirk. The bootloader normalizes | ||
700 | * the physical address to KERNBASE, so we have to | ||
701 | * factor that back out and add in the lowest valid | ||
702 | * physical page address to get the true physical address. | ||
703 | */ | ||
704 | ramdisk_image -= KERNBASE; | ||
705 | ramdisk_image += phys_base; | ||
706 | |||
707 | numadbg("Found ramdisk at physical address 0x%lx, size %u\n", | ||
708 | ramdisk_image, sparc_ramdisk_size); | ||
709 | |||
710 | initrd_start = ramdisk_image; | ||
711 | initrd_end = ramdisk_image + sparc_ramdisk_size; | ||
712 | |||
713 | lmb_reserve(initrd_start, sparc_ramdisk_size); | ||
714 | |||
715 | initrd_start += PAGE_OFFSET; | ||
716 | initrd_end += PAGE_OFFSET; | ||
717 | } | ||
718 | #endif | ||
719 | } | ||
720 | |||
721 | struct node_mem_mask { | ||
722 | unsigned long mask; | ||
723 | unsigned long val; | ||
724 | unsigned long bootmem_paddr; | ||
725 | }; | ||
726 | static struct node_mem_mask node_masks[MAX_NUMNODES]; | ||
727 | static int num_node_masks; | ||
728 | |||
729 | int numa_cpu_lookup_table[NR_CPUS]; | ||
730 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | ||
731 | |||
732 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
733 | |||
734 | struct mdesc_mblock { | ||
735 | u64 base; | ||
736 | u64 size; | ||
737 | u64 offset; /* RA-to-PA */ | ||
738 | }; | ||
739 | static struct mdesc_mblock *mblocks; | ||
740 | static int num_mblocks; | ||
741 | |||
742 | static unsigned long ra_to_pa(unsigned long addr) | ||
743 | { | ||
744 | int i; | ||
745 | |||
746 | for (i = 0; i < num_mblocks; i++) { | ||
747 | struct mdesc_mblock *m = &mblocks[i]; | ||
748 | |||
749 | if (addr >= m->base && | ||
750 | addr < (m->base + m->size)) { | ||
751 | addr += m->offset; | ||
752 | break; | ||
753 | } | ||
754 | } | ||
755 | return addr; | ||
756 | } | ||
757 | |||
758 | static int find_node(unsigned long addr) | ||
759 | { | ||
760 | int i; | ||
761 | |||
762 | addr = ra_to_pa(addr); | ||
763 | for (i = 0; i < num_node_masks; i++) { | ||
764 | struct node_mem_mask *p = &node_masks[i]; | ||
765 | |||
766 | if ((addr & p->mask) == p->val) | ||
767 | return i; | ||
768 | } | ||
769 | return -1; | ||
770 | } | ||
771 | |||
772 | static unsigned long nid_range(unsigned long start, unsigned long end, | ||
773 | int *nid) | ||
774 | { | ||
775 | *nid = find_node(start); | ||
776 | start += PAGE_SIZE; | ||
777 | while (start < end) { | ||
778 | int n = find_node(start); | ||
779 | |||
780 | if (n != *nid) | ||
781 | break; | ||
782 | start += PAGE_SIZE; | ||
783 | } | ||
784 | |||
785 | if (start > end) | ||
786 | start = end; | ||
787 | |||
788 | return start; | ||
789 | } | ||
790 | #else | ||
791 | static unsigned long nid_range(unsigned long start, unsigned long end, | ||
792 | int *nid) | ||
793 | { | ||
794 | *nid = 0; | ||
795 | return end; | ||
796 | } | ||
797 | #endif | ||
798 | |||
799 | /* This must be invoked after performing all of the necessary | ||
800 | * add_active_range() calls for 'nid'. We need to be able to get | ||
801 | * correct data from get_pfn_range_for_nid(). | ||
802 | */ | ||
803 | static void __init allocate_node_data(int nid) | ||
804 | { | ||
805 | unsigned long paddr, num_pages, start_pfn, end_pfn; | ||
806 | struct pglist_data *p; | ||
807 | |||
808 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
809 | paddr = lmb_alloc_nid(sizeof(struct pglist_data), | ||
810 | SMP_CACHE_BYTES, nid, nid_range); | ||
811 | if (!paddr) { | ||
812 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); | ||
813 | prom_halt(); | ||
814 | } | ||
815 | NODE_DATA(nid) = __va(paddr); | ||
816 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
817 | |||
818 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
819 | #endif | ||
820 | |||
821 | p = NODE_DATA(nid); | ||
822 | |||
823 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | ||
824 | p->node_start_pfn = start_pfn; | ||
825 | p->node_spanned_pages = end_pfn - start_pfn; | ||
826 | |||
827 | if (p->node_spanned_pages) { | ||
828 | num_pages = bootmem_bootmap_pages(p->node_spanned_pages); | ||
829 | |||
830 | paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, | ||
831 | nid_range); | ||
832 | if (!paddr) { | ||
833 | prom_printf("Cannot allocate bootmap for nid[%d]\n", | ||
834 | nid); | ||
835 | prom_halt(); | ||
836 | } | ||
837 | node_masks[nid].bootmem_paddr = paddr; | ||
838 | } | ||
839 | } | ||
840 | |||
841 | static void init_node_masks_nonnuma(void) | ||
842 | { | ||
843 | int i; | ||
844 | |||
845 | numadbg("Initializing tables for non-numa.\n"); | ||
846 | |||
847 | node_masks[0].mask = node_masks[0].val = 0; | ||
848 | num_node_masks = 1; | ||
849 | |||
850 | for (i = 0; i < NR_CPUS; i++) | ||
851 | numa_cpu_lookup_table[i] = 0; | ||
852 | |||
853 | numa_cpumask_lookup_table[0] = CPU_MASK_ALL; | ||
854 | } | ||
855 | |||
856 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
857 | struct pglist_data *node_data[MAX_NUMNODES]; | ||
858 | |||
859 | EXPORT_SYMBOL(numa_cpu_lookup_table); | ||
860 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | ||
861 | EXPORT_SYMBOL(node_data); | ||
862 | |||
863 | struct mdesc_mlgroup { | ||
864 | u64 node; | ||
865 | u64 latency; | ||
866 | u64 match; | ||
867 | u64 mask; | ||
868 | }; | ||
869 | static struct mdesc_mlgroup *mlgroups; | ||
870 | static int num_mlgroups; | ||
871 | |||
872 | static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, | ||
873 | u32 cfg_handle) | ||
874 | { | ||
875 | u64 arc; | ||
876 | |||
877 | mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { | ||
878 | u64 target = mdesc_arc_target(md, arc); | ||
879 | const u64 *val; | ||
880 | |||
881 | val = mdesc_get_property(md, target, | ||
882 | "cfg-handle", NULL); | ||
883 | if (val && *val == cfg_handle) | ||
884 | return 0; | ||
885 | } | ||
886 | return -ENODEV; | ||
887 | } | ||
888 | |||
889 | static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, | ||
890 | u32 cfg_handle) | ||
891 | { | ||
892 | u64 arc, candidate, best_latency = ~(u64)0; | ||
893 | |||
894 | candidate = MDESC_NODE_NULL; | ||
895 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | ||
896 | u64 target = mdesc_arc_target(md, arc); | ||
897 | const char *name = mdesc_node_name(md, target); | ||
898 | const u64 *val; | ||
899 | |||
900 | if (strcmp(name, "pio-latency-group")) | ||
901 | continue; | ||
902 | |||
903 | val = mdesc_get_property(md, target, "latency", NULL); | ||
904 | if (!val) | ||
905 | continue; | ||
906 | |||
907 | if (*val < best_latency) { | ||
908 | candidate = target; | ||
909 | best_latency = *val; | ||
910 | } | ||
911 | } | ||
912 | |||
913 | if (candidate == MDESC_NODE_NULL) | ||
914 | return -ENODEV; | ||
915 | |||
916 | return scan_pio_for_cfg_handle(md, candidate, cfg_handle); | ||
917 | } | ||
918 | |||
919 | int of_node_to_nid(struct device_node *dp) | ||
920 | { | ||
921 | const struct linux_prom64_registers *regs; | ||
922 | struct mdesc_handle *md; | ||
923 | u32 cfg_handle; | ||
924 | int count, nid; | ||
925 | u64 grp; | ||
926 | |||
927 | /* This is the right thing to do on currently supported | ||
928 | * SUN4U NUMA platforms as well, as the PCI controller does | ||
929 | * not sit behind any particular memory controller. | ||
930 | */ | ||
931 | if (!mlgroups) | ||
932 | return -1; | ||
933 | |||
934 | regs = of_get_property(dp, "reg", NULL); | ||
935 | if (!regs) | ||
936 | return -1; | ||
937 | |||
938 | cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; | ||
939 | |||
940 | md = mdesc_grab(); | ||
941 | |||
942 | count = 0; | ||
943 | nid = -1; | ||
944 | mdesc_for_each_node_by_name(md, grp, "group") { | ||
945 | if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { | ||
946 | nid = count; | ||
947 | break; | ||
948 | } | ||
949 | count++; | ||
950 | } | ||
951 | |||
952 | mdesc_release(md); | ||
953 | |||
954 | return nid; | ||
955 | } | ||
956 | |||
957 | static void add_node_ranges(void) | ||
958 | { | ||
959 | int i; | ||
960 | |||
961 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
962 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | ||
963 | unsigned long start, end; | ||
964 | |||
965 | start = lmb.memory.region[i].base; | ||
966 | end = start + size; | ||
967 | while (start < end) { | ||
968 | unsigned long this_end; | ||
969 | int nid; | ||
970 | |||
971 | this_end = nid_range(start, end, &nid); | ||
972 | |||
973 | numadbg("Adding active range nid[%d] " | ||
974 | "start[%lx] end[%lx]\n", | ||
975 | nid, start, this_end); | ||
976 | |||
977 | add_active_range(nid, | ||
978 | start >> PAGE_SHIFT, | ||
979 | this_end >> PAGE_SHIFT); | ||
980 | |||
981 | start = this_end; | ||
982 | } | ||
983 | } | ||
984 | } | ||
985 | |||
986 | static int __init grab_mlgroups(struct mdesc_handle *md) | ||
987 | { | ||
988 | unsigned long paddr; | ||
989 | int count = 0; | ||
990 | u64 node; | ||
991 | |||
992 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") | ||
993 | count++; | ||
994 | if (!count) | ||
995 | return -ENOENT; | ||
996 | |||
997 | paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), | ||
998 | SMP_CACHE_BYTES); | ||
999 | if (!paddr) | ||
1000 | return -ENOMEM; | ||
1001 | |||
1002 | mlgroups = __va(paddr); | ||
1003 | num_mlgroups = count; | ||
1004 | |||
1005 | count = 0; | ||
1006 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") { | ||
1007 | struct mdesc_mlgroup *m = &mlgroups[count++]; | ||
1008 | const u64 *val; | ||
1009 | |||
1010 | m->node = node; | ||
1011 | |||
1012 | val = mdesc_get_property(md, node, "latency", NULL); | ||
1013 | m->latency = *val; | ||
1014 | val = mdesc_get_property(md, node, "address-match", NULL); | ||
1015 | m->match = *val; | ||
1016 | val = mdesc_get_property(md, node, "address-mask", NULL); | ||
1017 | m->mask = *val; | ||
1018 | |||
1019 | numadbg("MLGROUP[%d]: node[%lx] latency[%lx] " | ||
1020 | "match[%lx] mask[%lx]\n", | ||
1021 | count - 1, m->node, m->latency, m->match, m->mask); | ||
1022 | } | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static int __init grab_mblocks(struct mdesc_handle *md) | ||
1028 | { | ||
1029 | unsigned long paddr; | ||
1030 | int count = 0; | ||
1031 | u64 node; | ||
1032 | |||
1033 | mdesc_for_each_node_by_name(md, node, "mblock") | ||
1034 | count++; | ||
1035 | if (!count) | ||
1036 | return -ENOENT; | ||
1037 | |||
1038 | paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), | ||
1039 | SMP_CACHE_BYTES); | ||
1040 | if (!paddr) | ||
1041 | return -ENOMEM; | ||
1042 | |||
1043 | mblocks = __va(paddr); | ||
1044 | num_mblocks = count; | ||
1045 | |||
1046 | count = 0; | ||
1047 | mdesc_for_each_node_by_name(md, node, "mblock") { | ||
1048 | struct mdesc_mblock *m = &mblocks[count++]; | ||
1049 | const u64 *val; | ||
1050 | |||
1051 | val = mdesc_get_property(md, node, "base", NULL); | ||
1052 | m->base = *val; | ||
1053 | val = mdesc_get_property(md, node, "size", NULL); | ||
1054 | m->size = *val; | ||
1055 | val = mdesc_get_property(md, node, | ||
1056 | "address-congruence-offset", NULL); | ||
1057 | m->offset = *val; | ||
1058 | |||
1059 | numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n", | ||
1060 | count - 1, m->base, m->size, m->offset); | ||
1061 | } | ||
1062 | |||
1063 | return 0; | ||
1064 | } | ||
1065 | |||
1066 | static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, | ||
1067 | u64 grp, cpumask_t *mask) | ||
1068 | { | ||
1069 | u64 arc; | ||
1070 | |||
1071 | cpus_clear(*mask); | ||
1072 | |||
1073 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { | ||
1074 | u64 target = mdesc_arc_target(md, arc); | ||
1075 | const char *name = mdesc_node_name(md, target); | ||
1076 | const u64 *id; | ||
1077 | |||
1078 | if (strcmp(name, "cpu")) | ||
1079 | continue; | ||
1080 | id = mdesc_get_property(md, target, "id", NULL); | ||
1081 | if (*id < NR_CPUS) | ||
1082 | cpu_set(*id, *mask); | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | static struct mdesc_mlgroup * __init find_mlgroup(u64 node) | ||
1087 | { | ||
1088 | int i; | ||
1089 | |||
1090 | for (i = 0; i < num_mlgroups; i++) { | ||
1091 | struct mdesc_mlgroup *m = &mlgroups[i]; | ||
1092 | if (m->node == node) | ||
1093 | return m; | ||
1094 | } | ||
1095 | return NULL; | ||
1096 | } | ||
1097 | |||
1098 | static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, | ||
1099 | int index) | ||
1100 | { | ||
1101 | struct mdesc_mlgroup *candidate = NULL; | ||
1102 | u64 arc, best_latency = ~(u64)0; | ||
1103 | struct node_mem_mask *n; | ||
1104 | |||
1105 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | ||
1106 | u64 target = mdesc_arc_target(md, arc); | ||
1107 | struct mdesc_mlgroup *m = find_mlgroup(target); | ||
1108 | if (!m) | ||
1109 | continue; | ||
1110 | if (m->latency < best_latency) { | ||
1111 | candidate = m; | ||
1112 | best_latency = m->latency; | ||
1113 | } | ||
1114 | } | ||
1115 | if (!candidate) | ||
1116 | return -ENOENT; | ||
1117 | |||
1118 | if (num_node_masks != index) { | ||
1119 | printk(KERN_ERR "Inconsistent NUMA state, " | ||
1120 | "index[%d] != num_node_masks[%d]\n", | ||
1121 | index, num_node_masks); | ||
1122 | return -EINVAL; | ||
1123 | } | ||
1124 | |||
1125 | n = &node_masks[num_node_masks++]; | ||
1126 | |||
1127 | n->mask = candidate->mask; | ||
1128 | n->val = candidate->match; | ||
1129 | |||
1130 | numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n", | ||
1131 | index, n->mask, n->val, candidate->latency); | ||
1132 | |||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1136 | static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, | ||
1137 | int index) | ||
1138 | { | ||
1139 | cpumask_t mask; | ||
1140 | int cpu; | ||
1141 | |||
1142 | numa_parse_mdesc_group_cpus(md, grp, &mask); | ||
1143 | |||
1144 | for_each_cpu_mask(cpu, mask) | ||
1145 | numa_cpu_lookup_table[cpu] = index; | ||
1146 | numa_cpumask_lookup_table[index] = mask; | ||
1147 | |||
1148 | if (numa_debug) { | ||
1149 | printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); | ||
1150 | for_each_cpu_mask(cpu, mask) | ||
1151 | printk("%d ", cpu); | ||
1152 | printk("]\n"); | ||
1153 | } | ||
1154 | |||
1155 | return numa_attach_mlgroup(md, grp, index); | ||
1156 | } | ||
1157 | |||
1158 | static int __init numa_parse_mdesc(void) | ||
1159 | { | ||
1160 | struct mdesc_handle *md = mdesc_grab(); | ||
1161 | int i, err, count; | ||
1162 | u64 node; | ||
1163 | |||
1164 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | ||
1165 | if (node == MDESC_NODE_NULL) { | ||
1166 | mdesc_release(md); | ||
1167 | return -ENOENT; | ||
1168 | } | ||
1169 | |||
1170 | err = grab_mblocks(md); | ||
1171 | if (err < 0) | ||
1172 | goto out; | ||
1173 | |||
1174 | err = grab_mlgroups(md); | ||
1175 | if (err < 0) | ||
1176 | goto out; | ||
1177 | |||
1178 | count = 0; | ||
1179 | mdesc_for_each_node_by_name(md, node, "group") { | ||
1180 | err = numa_parse_mdesc_group(md, node, count); | ||
1181 | if (err < 0) | ||
1182 | break; | ||
1183 | count++; | ||
1184 | } | ||
1185 | |||
1186 | add_node_ranges(); | ||
1187 | |||
1188 | for (i = 0; i < num_node_masks; i++) { | ||
1189 | allocate_node_data(i); | ||
1190 | node_set_online(i); | ||
1191 | } | ||
1192 | |||
1193 | err = 0; | ||
1194 | out: | ||
1195 | mdesc_release(md); | ||
1196 | return err; | ||
1197 | } | ||
1198 | |||
1199 | static int __init numa_parse_jbus(void) | ||
1200 | { | ||
1201 | unsigned long cpu, index; | ||
1202 | |||
1203 | /* NUMA node id is encoded in bits 36 and higher, and there is | ||
1204 | * a 1-to-1 mapping from CPU ID to NUMA node ID. | ||
1205 | */ | ||
1206 | index = 0; | ||
1207 | for_each_present_cpu(cpu) { | ||
1208 | numa_cpu_lookup_table[cpu] = index; | ||
1209 | numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); | ||
1210 | node_masks[index].mask = ~((1UL << 36UL) - 1UL); | ||
1211 | node_masks[index].val = cpu << 36UL; | ||
1212 | |||
1213 | index++; | ||
1214 | } | ||
1215 | num_node_masks = index; | ||
1216 | |||
1217 | add_node_ranges(); | ||
1218 | |||
1219 | for (index = 0; index < num_node_masks; index++) { | ||
1220 | allocate_node_data(index); | ||
1221 | node_set_online(index); | ||
1222 | } | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
1227 | static int __init numa_parse_sun4u(void) | ||
1228 | { | ||
1229 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
1230 | unsigned long ver; | ||
1231 | |||
1232 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | ||
1233 | if ((ver >> 32UL) == __JALAPENO_ID || | ||
1234 | (ver >> 32UL) == __SERRANO_ID) | ||
1235 | return numa_parse_jbus(); | ||
1236 | } | ||
1237 | return -1; | ||
1238 | } | ||
1239 | |||
1240 | static int __init bootmem_init_numa(void) | ||
1241 | { | ||
1242 | int err = -1; | ||
1243 | |||
1244 | numadbg("bootmem_init_numa()\n"); | ||
1245 | |||
1246 | if (numa_enabled) { | ||
1247 | if (tlb_type == hypervisor) | ||
1248 | err = numa_parse_mdesc(); | ||
1249 | else | ||
1250 | err = numa_parse_sun4u(); | ||
1251 | } | ||
1252 | return err; | ||
1253 | } | ||
1254 | |||
1255 | #else | ||
1256 | |||
1257 | static int bootmem_init_numa(void) | ||
1258 | { | ||
1259 | return -1; | ||
1260 | } | ||
1261 | |||
1262 | #endif | ||
1263 | |||
1264 | static void __init bootmem_init_nonnuma(void) | ||
1265 | { | ||
1266 | unsigned long top_of_ram = lmb_end_of_DRAM(); | ||
1267 | unsigned long total_ram = lmb_phys_mem_size(); | ||
1268 | unsigned int i; | ||
1269 | |||
1270 | numadbg("bootmem_init_nonnuma()\n"); | ||
1271 | |||
1272 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | ||
1273 | top_of_ram, total_ram); | ||
1274 | printk(KERN_INFO "Memory hole size: %ldMB\n", | ||
1275 | (top_of_ram - total_ram) >> 20); | ||
1276 | |||
1277 | init_node_masks_nonnuma(); | ||
1278 | |||
1279 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
1280 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | ||
1281 | unsigned long start_pfn, end_pfn; | ||
1282 | |||
1283 | if (!size) | ||
1284 | continue; | ||
1285 | |||
1286 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
1287 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
1288 | add_active_range(0, start_pfn, end_pfn); | ||
1289 | } | ||
1290 | |||
1291 | allocate_node_data(0); | ||
1292 | |||
1293 | node_set_online(0); | ||
1294 | } | ||
1295 | |||
1296 | static void __init reserve_range_in_node(int nid, unsigned long start, | ||
1297 | unsigned long end) | ||
1298 | { | ||
1299 | numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n", | ||
1300 | nid, start, end); | ||
1301 | while (start < end) { | ||
1302 | unsigned long this_end; | ||
1303 | int n; | ||
1304 | |||
1305 | this_end = nid_range(start, end, &n); | ||
1306 | if (n == nid) { | ||
1307 | numadbg(" MATCH reserving range [%lx:%lx]\n", | ||
1308 | start, this_end); | ||
1309 | reserve_bootmem_node(NODE_DATA(nid), start, | ||
1310 | (this_end - start), BOOTMEM_DEFAULT); | ||
1311 | } else | ||
1312 | numadbg(" NO MATCH, advancing start to %lx\n", | ||
1313 | this_end); | ||
1314 | |||
1315 | start = this_end; | ||
1316 | } | ||
1317 | } | ||
1318 | |||
1319 | static void __init trim_reserved_in_node(int nid) | ||
1320 | { | ||
1321 | int i; | ||
1322 | |||
1323 | numadbg(" trim_reserved_in_node(%d)\n", nid); | ||
1324 | |||
1325 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
1326 | unsigned long start = lmb.reserved.region[i].base; | ||
1327 | unsigned long size = lmb_size_bytes(&lmb.reserved, i); | ||
1328 | unsigned long end = start + size; | ||
1329 | |||
1330 | reserve_range_in_node(nid, start, end); | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | static void __init bootmem_init_one_node(int nid) | ||
1335 | { | ||
1336 | struct pglist_data *p; | ||
1337 | |||
1338 | numadbg("bootmem_init_one_node(%d)\n", nid); | ||
1339 | |||
1340 | p = NODE_DATA(nid); | ||
1341 | |||
1342 | if (p->node_spanned_pages) { | ||
1343 | unsigned long paddr = node_masks[nid].bootmem_paddr; | ||
1344 | unsigned long end_pfn; | ||
1345 | |||
1346 | end_pfn = p->node_start_pfn + p->node_spanned_pages; | ||
1347 | |||
1348 | numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n", | ||
1349 | nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | ||
1350 | |||
1351 | init_bootmem_node(p, paddr >> PAGE_SHIFT, | ||
1352 | p->node_start_pfn, end_pfn); | ||
1353 | |||
1354 | numadbg(" free_bootmem_with_active_regions(%d, %lx)\n", | ||
1355 | nid, end_pfn); | ||
1356 | free_bootmem_with_active_regions(nid, end_pfn); | ||
1357 | |||
1358 | trim_reserved_in_node(nid); | ||
1359 | |||
1360 | numadbg(" sparse_memory_present_with_active_regions(%d)\n", | ||
1361 | nid); | ||
1362 | sparse_memory_present_with_active_regions(nid); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | static unsigned long __init bootmem_init(unsigned long phys_base) | ||
1367 | { | ||
1368 | unsigned long end_pfn; | ||
1369 | int nid; | ||
1370 | |||
1371 | end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
1372 | max_pfn = max_low_pfn = end_pfn; | ||
1373 | min_low_pfn = (phys_base >> PAGE_SHIFT); | ||
1374 | |||
1375 | if (bootmem_init_numa() < 0) | ||
1376 | bootmem_init_nonnuma(); | ||
1377 | |||
1378 | /* XXX cpu notifier XXX */ | ||
1379 | |||
1380 | for_each_online_node(nid) | ||
1381 | bootmem_init_one_node(nid); | ||
1382 | |||
1383 | sparse_init(); | ||
1384 | |||
1385 | return end_pfn; | ||
1386 | } | ||
1387 | |||
1388 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | ||
1389 | static int pall_ents __initdata; | ||
1390 | |||
1391 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1392 | static unsigned long __ref kernel_map_range(unsigned long pstart, | ||
1393 | unsigned long pend, pgprot_t prot) | ||
1394 | { | ||
1395 | unsigned long vstart = PAGE_OFFSET + pstart; | ||
1396 | unsigned long vend = PAGE_OFFSET + pend; | ||
1397 | unsigned long alloc_bytes = 0UL; | ||
1398 | |||
1399 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | ||
1400 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", | ||
1401 | vstart, vend); | ||
1402 | prom_halt(); | ||
1403 | } | ||
1404 | |||
1405 | while (vstart < vend) { | ||
1406 | unsigned long this_end, paddr = __pa(vstart); | ||
1407 | pgd_t *pgd = pgd_offset_k(vstart); | ||
1408 | pud_t *pud; | ||
1409 | pmd_t *pmd; | ||
1410 | pte_t *pte; | ||
1411 | |||
1412 | pud = pud_offset(pgd, vstart); | ||
1413 | if (pud_none(*pud)) { | ||
1414 | pmd_t *new; | ||
1415 | |||
1416 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1417 | alloc_bytes += PAGE_SIZE; | ||
1418 | pud_populate(&init_mm, pud, new); | ||
1419 | } | ||
1420 | |||
1421 | pmd = pmd_offset(pud, vstart); | ||
1422 | if (!pmd_present(*pmd)) { | ||
1423 | pte_t *new; | ||
1424 | |||
1425 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1426 | alloc_bytes += PAGE_SIZE; | ||
1427 | pmd_populate_kernel(&init_mm, pmd, new); | ||
1428 | } | ||
1429 | |||
1430 | pte = pte_offset_kernel(pmd, vstart); | ||
1431 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | ||
1432 | if (this_end > vend) | ||
1433 | this_end = vend; | ||
1434 | |||
1435 | while (vstart < this_end) { | ||
1436 | pte_val(*pte) = (paddr | pgprot_val(prot)); | ||
1437 | |||
1438 | vstart += PAGE_SIZE; | ||
1439 | paddr += PAGE_SIZE; | ||
1440 | pte++; | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | return alloc_bytes; | ||
1445 | } | ||
1446 | |||
1447 | extern unsigned int kvmap_linear_patch[1]; | ||
1448 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
1449 | |||
1450 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | ||
1451 | { | ||
1452 | const unsigned long shift_256MB = 28; | ||
1453 | const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); | ||
1454 | const unsigned long size_256MB = (1UL << shift_256MB); | ||
1455 | |||
1456 | while (start < end) { | ||
1457 | long remains; | ||
1458 | |||
1459 | remains = end - start; | ||
1460 | if (remains < size_256MB) | ||
1461 | break; | ||
1462 | |||
1463 | if (start & mask_256MB) { | ||
1464 | start = (start + size_256MB) & ~mask_256MB; | ||
1465 | continue; | ||
1466 | } | ||
1467 | |||
1468 | while (remains >= size_256MB) { | ||
1469 | unsigned long index = start >> shift_256MB; | ||
1470 | |||
1471 | __set_bit(index, kpte_linear_bitmap); | ||
1472 | |||
1473 | start += size_256MB; | ||
1474 | remains -= size_256MB; | ||
1475 | } | ||
1476 | } | ||
1477 | } | ||
1478 | |||
1479 | static void __init init_kpte_bitmap(void) | ||
1480 | { | ||
1481 | unsigned long i; | ||
1482 | |||
1483 | for (i = 0; i < pall_ents; i++) { | ||
1484 | unsigned long phys_start, phys_end; | ||
1485 | |||
1486 | phys_start = pall[i].phys_addr; | ||
1487 | phys_end = phys_start + pall[i].reg_size; | ||
1488 | |||
1489 | mark_kpte_bitmap(phys_start, phys_end); | ||
1490 | } | ||
1491 | } | ||
1492 | |||
1493 | static void __init kernel_physical_mapping_init(void) | ||
1494 | { | ||
1495 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1496 | unsigned long i, mem_alloced = 0UL; | ||
1497 | |||
1498 | for (i = 0; i < pall_ents; i++) { | ||
1499 | unsigned long phys_start, phys_end; | ||
1500 | |||
1501 | phys_start = pall[i].phys_addr; | ||
1502 | phys_end = phys_start + pall[i].reg_size; | ||
1503 | |||
1504 | mem_alloced += kernel_map_range(phys_start, phys_end, | ||
1505 | PAGE_KERNEL); | ||
1506 | } | ||
1507 | |||
1508 | printk("Allocated %ld bytes for kernel page tables.\n", | ||
1509 | mem_alloced); | ||
1510 | |||
1511 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | ||
1512 | flushi(&kvmap_linear_patch[0]); | ||
1513 | |||
1514 | __flush_tlb_all(); | ||
1515 | #endif | ||
1516 | } | ||
1517 | |||
1518 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1519 | void kernel_map_pages(struct page *page, int numpages, int enable) | ||
1520 | { | ||
1521 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | ||
1522 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | ||
1523 | |||
1524 | kernel_map_range(phys_start, phys_end, | ||
1525 | (enable ? PAGE_KERNEL : __pgprot(0))); | ||
1526 | |||
1527 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, | ||
1528 | PAGE_OFFSET + phys_end); | ||
1529 | |||
1530 | /* we should perform an IPI and flush all tlbs, | ||
1531 | * but that can deadlock->flush only current cpu. | ||
1532 | */ | ||
1533 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | ||
1534 | PAGE_OFFSET + phys_end); | ||
1535 | } | ||
1536 | #endif | ||
1537 | |||
1538 | unsigned long __init find_ecache_flush_span(unsigned long size) | ||
1539 | { | ||
1540 | int i; | ||
1541 | |||
1542 | for (i = 0; i < pavail_ents; i++) { | ||
1543 | if (pavail[i].reg_size >= size) | ||
1544 | return pavail[i].phys_addr; | ||
1545 | } | ||
1546 | |||
1547 | return ~0UL; | ||
1548 | } | ||
1549 | |||
1550 | static void __init tsb_phys_patch(void) | ||
1551 | { | ||
1552 | struct tsb_ldquad_phys_patch_entry *pquad; | ||
1553 | struct tsb_phys_patch_entry *p; | ||
1554 | |||
1555 | pquad = &__tsb_ldquad_phys_patch; | ||
1556 | while (pquad < &__tsb_ldquad_phys_patch_end) { | ||
1557 | unsigned long addr = pquad->addr; | ||
1558 | |||
1559 | if (tlb_type == hypervisor) | ||
1560 | *(unsigned int *) addr = pquad->sun4v_insn; | ||
1561 | else | ||
1562 | *(unsigned int *) addr = pquad->sun4u_insn; | ||
1563 | wmb(); | ||
1564 | __asm__ __volatile__("flush %0" | ||
1565 | : /* no outputs */ | ||
1566 | : "r" (addr)); | ||
1567 | |||
1568 | pquad++; | ||
1569 | } | ||
1570 | |||
1571 | p = &__tsb_phys_patch; | ||
1572 | while (p < &__tsb_phys_patch_end) { | ||
1573 | unsigned long addr = p->addr; | ||
1574 | |||
1575 | *(unsigned int *) addr = p->insn; | ||
1576 | wmb(); | ||
1577 | __asm__ __volatile__("flush %0" | ||
1578 | : /* no outputs */ | ||
1579 | : "r" (addr)); | ||
1580 | |||
1581 | p++; | ||
1582 | } | ||
1583 | } | ||
1584 | |||
1585 | /* Don't mark as init, we give this to the Hypervisor. */ | ||
1586 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
1587 | #define NUM_KTSB_DESCR 2 | ||
1588 | #else | ||
1589 | #define NUM_KTSB_DESCR 1 | ||
1590 | #endif | ||
1591 | static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; | ||
1592 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
1593 | |||
1594 | static void __init sun4v_ktsb_init(void) | ||
1595 | { | ||
1596 | unsigned long ktsb_pa; | ||
1597 | |||
1598 | /* First KTSB for PAGE_SIZE mappings. */ | ||
1599 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | ||
1600 | |||
1601 | switch (PAGE_SIZE) { | ||
1602 | case 8 * 1024: | ||
1603 | default: | ||
1604 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; | ||
1605 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; | ||
1606 | break; | ||
1607 | |||
1608 | case 64 * 1024: | ||
1609 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; | ||
1610 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; | ||
1611 | break; | ||
1612 | |||
1613 | case 512 * 1024: | ||
1614 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; | ||
1615 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; | ||
1616 | break; | ||
1617 | |||
1618 | case 4 * 1024 * 1024: | ||
1619 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1620 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; | ||
1621 | break; | ||
1622 | }; | ||
1623 | |||
1624 | ktsb_descr[0].assoc = 1; | ||
1625 | ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; | ||
1626 | ktsb_descr[0].ctx_idx = 0; | ||
1627 | ktsb_descr[0].tsb_base = ktsb_pa; | ||
1628 | ktsb_descr[0].resv = 0; | ||
1629 | |||
1630 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
1631 | /* Second KTSB for 4MB/256MB mappings. */ | ||
1632 | ktsb_pa = (kern_base + | ||
1633 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | ||
1634 | |||
1635 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | ||
1636 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | ||
1637 | HV_PGSZ_MASK_256MB); | ||
1638 | ktsb_descr[1].assoc = 1; | ||
1639 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | ||
1640 | ktsb_descr[1].ctx_idx = 0; | ||
1641 | ktsb_descr[1].tsb_base = ktsb_pa; | ||
1642 | ktsb_descr[1].resv = 0; | ||
1643 | #endif | ||
1644 | } | ||
1645 | |||
1646 | void __cpuinit sun4v_ktsb_register(void) | ||
1647 | { | ||
1648 | unsigned long pa, ret; | ||
1649 | |||
1650 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | ||
1651 | |||
1652 | ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); | ||
1653 | if (ret != 0) { | ||
1654 | prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " | ||
1655 | "errors with %lx\n", pa, ret); | ||
1656 | prom_halt(); | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | /* paging_init() sets up the page tables */ | ||
1661 | |||
1662 | static unsigned long last_valid_pfn; | ||
1663 | pgd_t swapper_pg_dir[2048]; | ||
1664 | |||
1665 | static void sun4u_pgprot_init(void); | ||
1666 | static void sun4v_pgprot_init(void); | ||
1667 | |||
1668 | /* Dummy function */ | ||
1669 | void __init setup_per_cpu_areas(void) | ||
1670 | { | ||
1671 | } | ||
1672 | |||
1673 | void __init paging_init(void) | ||
1674 | { | ||
1675 | unsigned long end_pfn, shift, phys_base; | ||
1676 | unsigned long real_end, i; | ||
1677 | |||
1678 | /* These build time checkes make sure that the dcache_dirty_cpu() | ||
1679 | * page->flags usage will work. | ||
1680 | * | ||
1681 | * When a page gets marked as dcache-dirty, we store the | ||
1682 | * cpu number starting at bit 32 in the page->flags. Also, | ||
1683 | * functions like clear_dcache_dirty_cpu use the cpu mask | ||
1684 | * in 13-bit signed-immediate instruction fields. | ||
1685 | */ | ||
1686 | |||
1687 | /* | ||
1688 | * Page flags must not reach into upper 32 bits that are used | ||
1689 | * for the cpu number | ||
1690 | */ | ||
1691 | BUILD_BUG_ON(NR_PAGEFLAGS > 32); | ||
1692 | |||
1693 | /* | ||
1694 | * The bit fields placed in the high range must not reach below | ||
1695 | * the 32 bit boundary. Otherwise we cannot place the cpu field | ||
1696 | * at the 32 bit boundary. | ||
1697 | */ | ||
1698 | BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + | ||
1699 | ilog2(roundup_pow_of_two(NR_CPUS)) > 32); | ||
1700 | |||
1701 | BUILD_BUG_ON(NR_CPUS > 4096); | ||
1702 | |||
1703 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | ||
1704 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
1705 | |||
1706 | /* Invalidate both kernel TSBs. */ | ||
1707 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); | ||
1708 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
1709 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | ||
1710 | #endif | ||
1711 | |||
1712 | if (tlb_type == hypervisor) | ||
1713 | sun4v_pgprot_init(); | ||
1714 | else | ||
1715 | sun4u_pgprot_init(); | ||
1716 | |||
1717 | if (tlb_type == cheetah_plus || | ||
1718 | tlb_type == hypervisor) | ||
1719 | tsb_phys_patch(); | ||
1720 | |||
1721 | if (tlb_type == hypervisor) { | ||
1722 | sun4v_patch_tlb_handlers(); | ||
1723 | sun4v_ktsb_init(); | ||
1724 | } | ||
1725 | |||
1726 | lmb_init(); | ||
1727 | |||
1728 | /* Find available physical memory... | ||
1729 | * | ||
1730 | * Read it twice in order to work around a bug in openfirmware. | ||
1731 | * The call to grab this table itself can cause openfirmware to | ||
1732 | * allocate memory, which in turn can take away some space from | ||
1733 | * the list of available memory. Reading it twice makes sure | ||
1734 | * we really do get the final value. | ||
1735 | */ | ||
1736 | read_obp_translations(); | ||
1737 | read_obp_memory("reg", &pall[0], &pall_ents); | ||
1738 | read_obp_memory("available", &pavail[0], &pavail_ents); | ||
1739 | read_obp_memory("available", &pavail[0], &pavail_ents); | ||
1740 | |||
1741 | phys_base = 0xffffffffffffffffUL; | ||
1742 | for (i = 0; i < pavail_ents; i++) { | ||
1743 | phys_base = min(phys_base, pavail[i].phys_addr); | ||
1744 | lmb_add(pavail[i].phys_addr, pavail[i].reg_size); | ||
1745 | } | ||
1746 | |||
1747 | lmb_reserve(kern_base, kern_size); | ||
1748 | |||
1749 | find_ramdisk(phys_base); | ||
1750 | |||
1751 | lmb_enforce_memory_limit(cmdline_memory_size); | ||
1752 | |||
1753 | lmb_analyze(); | ||
1754 | lmb_dump_all(); | ||
1755 | |||
1756 | set_bit(0, mmu_context_bmap); | ||
1757 | |||
1758 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | ||
1759 | |||
1760 | real_end = (unsigned long)_end; | ||
1761 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); | ||
1762 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", | ||
1763 | num_kernel_image_mappings); | ||
1764 | |||
1765 | /* Set kernel pgd to upper alias so physical page computations | ||
1766 | * work. | ||
1767 | */ | ||
1768 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | ||
1769 | |||
1770 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); | ||
1771 | |||
1772 | /* Now can init the kernel/bad page tables. */ | ||
1773 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | ||
1774 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); | ||
1775 | |||
1776 | inherit_prom_mappings(); | ||
1777 | |||
1778 | init_kpte_bitmap(); | ||
1779 | |||
1780 | /* Ok, we can use our TLB miss and window trap handlers safely. */ | ||
1781 | setup_tba(); | ||
1782 | |||
1783 | __flush_tlb_all(); | ||
1784 | |||
1785 | if (tlb_type == hypervisor) | ||
1786 | sun4v_ktsb_register(); | ||
1787 | |||
1788 | /* We must setup the per-cpu areas before we pull in the | ||
1789 | * PROM and the MDESC. The code there fills in cpu and | ||
1790 | * other information into per-cpu data structures. | ||
1791 | */ | ||
1792 | real_setup_per_cpu_areas(); | ||
1793 | |||
1794 | prom_build_devicetree(); | ||
1795 | |||
1796 | if (tlb_type == hypervisor) | ||
1797 | sun4v_mdesc_init(); | ||
1798 | |||
1799 | /* Once the OF device tree and MDESC have been setup, we know | ||
1800 | * the list of possible cpus. Therefore we can allocate the | ||
1801 | * IRQ stacks. | ||
1802 | */ | ||
1803 | for_each_possible_cpu(i) { | ||
1804 | /* XXX Use node local allocations... XXX */ | ||
1805 | softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | ||
1806 | hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | ||
1807 | } | ||
1808 | |||
1809 | /* Setup bootmem... */ | ||
1810 | last_valid_pfn = end_pfn = bootmem_init(phys_base); | ||
1811 | |||
1812 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
1813 | max_mapnr = last_valid_pfn; | ||
1814 | #endif | ||
1815 | kernel_physical_mapping_init(); | ||
1816 | |||
1817 | { | ||
1818 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
1819 | |||
1820 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
1821 | |||
1822 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | ||
1823 | |||
1824 | free_area_init_nodes(max_zone_pfns); | ||
1825 | } | ||
1826 | |||
1827 | printk("Booting Linux...\n"); | ||
1828 | } | ||
1829 | |||
1830 | int __init page_in_phys_avail(unsigned long paddr) | ||
1831 | { | ||
1832 | int i; | ||
1833 | |||
1834 | paddr &= PAGE_MASK; | ||
1835 | |||
1836 | for (i = 0; i < pavail_ents; i++) { | ||
1837 | unsigned long start, end; | ||
1838 | |||
1839 | start = pavail[i].phys_addr; | ||
1840 | end = start + pavail[i].reg_size; | ||
1841 | |||
1842 | if (paddr >= start && paddr < end) | ||
1843 | return 1; | ||
1844 | } | ||
1845 | if (paddr >= kern_base && paddr < (kern_base + kern_size)) | ||
1846 | return 1; | ||
1847 | #ifdef CONFIG_BLK_DEV_INITRD | ||
1848 | if (paddr >= __pa(initrd_start) && | ||
1849 | paddr < __pa(PAGE_ALIGN(initrd_end))) | ||
1850 | return 1; | ||
1851 | #endif | ||
1852 | |||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | ||
1857 | static int pavail_rescan_ents __initdata; | ||
1858 | |||
1859 | /* Certain OBP calls, such as fetching "available" properties, can | ||
1860 | * claim physical memory. So, along with initializing the valid | ||
1861 | * address bitmap, what we do here is refetch the physical available | ||
1862 | * memory list again, and make sure it provides at least as much | ||
1863 | * memory as 'pavail' does. | ||
1864 | */ | ||
1865 | static void __init setup_valid_addr_bitmap_from_pavail(void) | ||
1866 | { | ||
1867 | int i; | ||
1868 | |||
1869 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); | ||
1870 | |||
1871 | for (i = 0; i < pavail_ents; i++) { | ||
1872 | unsigned long old_start, old_end; | ||
1873 | |||
1874 | old_start = pavail[i].phys_addr; | ||
1875 | old_end = old_start + pavail[i].reg_size; | ||
1876 | while (old_start < old_end) { | ||
1877 | int n; | ||
1878 | |||
1879 | for (n = 0; n < pavail_rescan_ents; n++) { | ||
1880 | unsigned long new_start, new_end; | ||
1881 | |||
1882 | new_start = pavail_rescan[n].phys_addr; | ||
1883 | new_end = new_start + | ||
1884 | pavail_rescan[n].reg_size; | ||
1885 | |||
1886 | if (new_start <= old_start && | ||
1887 | new_end >= (old_start + PAGE_SIZE)) { | ||
1888 | set_bit(old_start >> 22, | ||
1889 | sparc64_valid_addr_bitmap); | ||
1890 | goto do_next_page; | ||
1891 | } | ||
1892 | } | ||
1893 | |||
1894 | prom_printf("mem_init: Lost memory in pavail\n"); | ||
1895 | prom_printf("mem_init: OLD start[%lx] size[%lx]\n", | ||
1896 | pavail[i].phys_addr, | ||
1897 | pavail[i].reg_size); | ||
1898 | prom_printf("mem_init: NEW start[%lx] size[%lx]\n", | ||
1899 | pavail_rescan[i].phys_addr, | ||
1900 | pavail_rescan[i].reg_size); | ||
1901 | prom_printf("mem_init: Cannot continue, aborting.\n"); | ||
1902 | prom_halt(); | ||
1903 | |||
1904 | do_next_page: | ||
1905 | old_start += PAGE_SIZE; | ||
1906 | } | ||
1907 | } | ||
1908 | } | ||
1909 | |||
1910 | void __init mem_init(void) | ||
1911 | { | ||
1912 | unsigned long codepages, datapages, initpages; | ||
1913 | unsigned long addr, last; | ||
1914 | int i; | ||
1915 | |||
1916 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | ||
1917 | i += 1; | ||
1918 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); | ||
1919 | if (sparc64_valid_addr_bitmap == NULL) { | ||
1920 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | ||
1921 | prom_halt(); | ||
1922 | } | ||
1923 | memset(sparc64_valid_addr_bitmap, 0, i << 3); | ||
1924 | |||
1925 | addr = PAGE_OFFSET + kern_base; | ||
1926 | last = PAGE_ALIGN(kern_size) + addr; | ||
1927 | while (addr < last) { | ||
1928 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | ||
1929 | addr += PAGE_SIZE; | ||
1930 | } | ||
1931 | |||
1932 | setup_valid_addr_bitmap_from_pavail(); | ||
1933 | |||
1934 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | ||
1935 | |||
1936 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
1937 | for_each_online_node(i) { | ||
1938 | if (NODE_DATA(i)->node_spanned_pages != 0) { | ||
1939 | totalram_pages += | ||
1940 | free_all_bootmem_node(NODE_DATA(i)); | ||
1941 | } | ||
1942 | } | ||
1943 | #else | ||
1944 | totalram_pages = free_all_bootmem(); | ||
1945 | #endif | ||
1946 | |||
1947 | /* We subtract one to account for the mem_map_zero page | ||
1948 | * allocated below. | ||
1949 | */ | ||
1950 | totalram_pages -= 1; | ||
1951 | num_physpages = totalram_pages; | ||
1952 | |||
1953 | /* | ||
1954 | * Set up the zero page, mark it reserved, so that page count | ||
1955 | * is not manipulated when freeing the page from user ptes. | ||
1956 | */ | ||
1957 | mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); | ||
1958 | if (mem_map_zero == NULL) { | ||
1959 | prom_printf("paging_init: Cannot alloc zero page.\n"); | ||
1960 | prom_halt(); | ||
1961 | } | ||
1962 | SetPageReserved(mem_map_zero); | ||
1963 | |||
1964 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | ||
1965 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | ||
1966 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | ||
1967 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | ||
1968 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | ||
1969 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | ||
1970 | |||
1971 | printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", | ||
1972 | nr_free_pages() << (PAGE_SHIFT-10), | ||
1973 | codepages << (PAGE_SHIFT-10), | ||
1974 | datapages << (PAGE_SHIFT-10), | ||
1975 | initpages << (PAGE_SHIFT-10), | ||
1976 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | ||
1977 | |||
1978 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
1979 | cheetah_ecache_flush_init(); | ||
1980 | } | ||
1981 | |||
1982 | void free_initmem(void) | ||
1983 | { | ||
1984 | unsigned long addr, initend; | ||
1985 | int do_free = 1; | ||
1986 | |||
1987 | /* If the physical memory maps were trimmed by kernel command | ||
1988 | * line options, don't even try freeing this initmem stuff up. | ||
1989 | * The kernel image could have been in the trimmed out region | ||
1990 | * and if so the freeing below will free invalid page structs. | ||
1991 | */ | ||
1992 | if (cmdline_memory_size) | ||
1993 | do_free = 0; | ||
1994 | |||
1995 | /* | ||
1996 | * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. | ||
1997 | */ | ||
1998 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); | ||
1999 | initend = (unsigned long)(__init_end) & PAGE_MASK; | ||
2000 | for (; addr < initend; addr += PAGE_SIZE) { | ||
2001 | unsigned long page; | ||
2002 | struct page *p; | ||
2003 | |||
2004 | page = (addr + | ||
2005 | ((unsigned long) __va(kern_base)) - | ||
2006 | ((unsigned long) KERNBASE)); | ||
2007 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
2008 | |||
2009 | if (do_free) { | ||
2010 | p = virt_to_page(page); | ||
2011 | |||
2012 | ClearPageReserved(p); | ||
2013 | init_page_count(p); | ||
2014 | __free_page(p); | ||
2015 | num_physpages++; | ||
2016 | totalram_pages++; | ||
2017 | } | ||
2018 | } | ||
2019 | } | ||
2020 | |||
2021 | #ifdef CONFIG_BLK_DEV_INITRD | ||
2022 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
2023 | { | ||
2024 | if (start < end) | ||
2025 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
2026 | for (; start < end; start += PAGE_SIZE) { | ||
2027 | struct page *p = virt_to_page(start); | ||
2028 | |||
2029 | ClearPageReserved(p); | ||
2030 | init_page_count(p); | ||
2031 | __free_page(p); | ||
2032 | num_physpages++; | ||
2033 | totalram_pages++; | ||
2034 | } | ||
2035 | } | ||
2036 | #endif | ||
2037 | |||
2038 | #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) | ||
2039 | #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) | ||
2040 | #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | ||
2041 | #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | ||
2042 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | ||
2043 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | ||
2044 | |||
2045 | pgprot_t PAGE_KERNEL __read_mostly; | ||
2046 | EXPORT_SYMBOL(PAGE_KERNEL); | ||
2047 | |||
2048 | pgprot_t PAGE_KERNEL_LOCKED __read_mostly; | ||
2049 | pgprot_t PAGE_COPY __read_mostly; | ||
2050 | |||
2051 | pgprot_t PAGE_SHARED __read_mostly; | ||
2052 | EXPORT_SYMBOL(PAGE_SHARED); | ||
2053 | |||
2054 | unsigned long pg_iobits __read_mostly; | ||
2055 | |||
2056 | unsigned long _PAGE_IE __read_mostly; | ||
2057 | EXPORT_SYMBOL(_PAGE_IE); | ||
2058 | |||
2059 | unsigned long _PAGE_E __read_mostly; | ||
2060 | EXPORT_SYMBOL(_PAGE_E); | ||
2061 | |||
2062 | unsigned long _PAGE_CACHE __read_mostly; | ||
2063 | EXPORT_SYMBOL(_PAGE_CACHE); | ||
2064 | |||
2065 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
2066 | unsigned long vmemmap_table[VMEMMAP_SIZE]; | ||
2067 | |||
2068 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | ||
2069 | { | ||
2070 | unsigned long vstart = (unsigned long) start; | ||
2071 | unsigned long vend = (unsigned long) (start + nr); | ||
2072 | unsigned long phys_start = (vstart - VMEMMAP_BASE); | ||
2073 | unsigned long phys_end = (vend - VMEMMAP_BASE); | ||
2074 | unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; | ||
2075 | unsigned long end = VMEMMAP_ALIGN(phys_end); | ||
2076 | unsigned long pte_base; | ||
2077 | |||
2078 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | | ||
2079 | _PAGE_CP_4U | _PAGE_CV_4U | | ||
2080 | _PAGE_P_4U | _PAGE_W_4U); | ||
2081 | if (tlb_type == hypervisor) | ||
2082 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | | ||
2083 | _PAGE_CP_4V | _PAGE_CV_4V | | ||
2084 | _PAGE_P_4V | _PAGE_W_4V); | ||
2085 | |||
2086 | for (; addr < end; addr += VMEMMAP_CHUNK) { | ||
2087 | unsigned long *vmem_pp = | ||
2088 | vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); | ||
2089 | void *block; | ||
2090 | |||
2091 | if (!(*vmem_pp & _PAGE_VALID)) { | ||
2092 | block = vmemmap_alloc_block(1UL << 22, node); | ||
2093 | if (!block) | ||
2094 | return -ENOMEM; | ||
2095 | |||
2096 | *vmem_pp = pte_base | __pa(block); | ||
2097 | |||
2098 | printk(KERN_INFO "[%p-%p] page_structs=%lu " | ||
2099 | "node=%d entry=%lu/%lu\n", start, block, nr, | ||
2100 | node, | ||
2101 | addr >> VMEMMAP_CHUNK_SHIFT, | ||
2102 | VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT); | ||
2103 | } | ||
2104 | } | ||
2105 | return 0; | ||
2106 | } | ||
2107 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
2108 | |||
2109 | static void prot_init_common(unsigned long page_none, | ||
2110 | unsigned long page_shared, | ||
2111 | unsigned long page_copy, | ||
2112 | unsigned long page_readonly, | ||
2113 | unsigned long page_exec_bit) | ||
2114 | { | ||
2115 | PAGE_COPY = __pgprot(page_copy); | ||
2116 | PAGE_SHARED = __pgprot(page_shared); | ||
2117 | |||
2118 | protection_map[0x0] = __pgprot(page_none); | ||
2119 | protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); | ||
2120 | protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); | ||
2121 | protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); | ||
2122 | protection_map[0x4] = __pgprot(page_readonly); | ||
2123 | protection_map[0x5] = __pgprot(page_readonly); | ||
2124 | protection_map[0x6] = __pgprot(page_copy); | ||
2125 | protection_map[0x7] = __pgprot(page_copy); | ||
2126 | protection_map[0x8] = __pgprot(page_none); | ||
2127 | protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); | ||
2128 | protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); | ||
2129 | protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); | ||
2130 | protection_map[0xc] = __pgprot(page_readonly); | ||
2131 | protection_map[0xd] = __pgprot(page_readonly); | ||
2132 | protection_map[0xe] = __pgprot(page_shared); | ||
2133 | protection_map[0xf] = __pgprot(page_shared); | ||
2134 | } | ||
2135 | |||
2136 | static void __init sun4u_pgprot_init(void) | ||
2137 | { | ||
2138 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
2139 | unsigned long page_exec_bit; | ||
2140 | |||
2141 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
2142 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
2143 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
2144 | _PAGE_EXEC_4U); | ||
2145 | PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | ||
2146 | _PAGE_CACHE_4U | _PAGE_P_4U | | ||
2147 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | ||
2148 | _PAGE_EXEC_4U | _PAGE_L_4U); | ||
2149 | |||
2150 | _PAGE_IE = _PAGE_IE_4U; | ||
2151 | _PAGE_E = _PAGE_E_4U; | ||
2152 | _PAGE_CACHE = _PAGE_CACHE_4U; | ||
2153 | |||
2154 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | | ||
2155 | __ACCESS_BITS_4U | _PAGE_E_4U); | ||
2156 | |||
2157 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2158 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ | ||
2159 | 0xfffff80000000000UL; | ||
2160 | #else | ||
2161 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ | ||
2162 | 0xfffff80000000000UL; | ||
2163 | #endif | ||
2164 | kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | | ||
2165 | _PAGE_P_4U | _PAGE_W_4U); | ||
2166 | |||
2167 | /* XXX Should use 256MB on Panther. XXX */ | ||
2168 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; | ||
2169 | |||
2170 | _PAGE_SZBITS = _PAGE_SZBITS_4U; | ||
2171 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | | ||
2172 | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | | ||
2173 | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); | ||
2174 | |||
2175 | |||
2176 | page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; | ||
2177 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
2178 | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); | ||
2179 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
2180 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
2181 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | ||
2182 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | ||
2183 | |||
2184 | page_exec_bit = _PAGE_EXEC_4U; | ||
2185 | |||
2186 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
2187 | page_exec_bit); | ||
2188 | } | ||
2189 | |||
2190 | static void __init sun4v_pgprot_init(void) | ||
2191 | { | ||
2192 | unsigned long page_none, page_shared, page_copy, page_readonly; | ||
2193 | unsigned long page_exec_bit; | ||
2194 | |||
2195 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | ||
2196 | _PAGE_CACHE_4V | _PAGE_P_4V | | ||
2197 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | ||
2198 | _PAGE_EXEC_4V); | ||
2199 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | ||
2200 | |||
2201 | _PAGE_IE = _PAGE_IE_4V; | ||
2202 | _PAGE_E = _PAGE_E_4V; | ||
2203 | _PAGE_CACHE = _PAGE_CACHE_4V; | ||
2204 | |||
2205 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2206 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | ||
2207 | 0xfffff80000000000UL; | ||
2208 | #else | ||
2209 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ | ||
2210 | 0xfffff80000000000UL; | ||
2211 | #endif | ||
2212 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
2213 | _PAGE_P_4V | _PAGE_W_4V); | ||
2214 | |||
2215 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2216 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | ||
2217 | 0xfffff80000000000UL; | ||
2218 | #else | ||
2219 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ | ||
2220 | 0xfffff80000000000UL; | ||
2221 | #endif | ||
2222 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | | ||
2223 | _PAGE_P_4V | _PAGE_W_4V); | ||
2224 | |||
2225 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | | ||
2226 | __ACCESS_BITS_4V | _PAGE_E_4V); | ||
2227 | |||
2228 | _PAGE_SZBITS = _PAGE_SZBITS_4V; | ||
2229 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | | ||
2230 | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | | ||
2231 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | ||
2232 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | ||
2233 | |||
2234 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | ||
2235 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
2236 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | ||
2237 | page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
2238 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
2239 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | ||
2240 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | ||
2241 | |||
2242 | page_exec_bit = _PAGE_EXEC_4V; | ||
2243 | |||
2244 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | ||
2245 | page_exec_bit); | ||
2246 | } | ||
2247 | |||
2248 | unsigned long pte_sz_bits(unsigned long sz) | ||
2249 | { | ||
2250 | if (tlb_type == hypervisor) { | ||
2251 | switch (sz) { | ||
2252 | case 8 * 1024: | ||
2253 | default: | ||
2254 | return _PAGE_SZ8K_4V; | ||
2255 | case 64 * 1024: | ||
2256 | return _PAGE_SZ64K_4V; | ||
2257 | case 512 * 1024: | ||
2258 | return _PAGE_SZ512K_4V; | ||
2259 | case 4 * 1024 * 1024: | ||
2260 | return _PAGE_SZ4MB_4V; | ||
2261 | }; | ||
2262 | } else { | ||
2263 | switch (sz) { | ||
2264 | case 8 * 1024: | ||
2265 | default: | ||
2266 | return _PAGE_SZ8K_4U; | ||
2267 | case 64 * 1024: | ||
2268 | return _PAGE_SZ64K_4U; | ||
2269 | case 512 * 1024: | ||
2270 | return _PAGE_SZ512K_4U; | ||
2271 | case 4 * 1024 * 1024: | ||
2272 | return _PAGE_SZ4MB_4U; | ||
2273 | }; | ||
2274 | } | ||
2275 | } | ||
2276 | |||
2277 | pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) | ||
2278 | { | ||
2279 | pte_t pte; | ||
2280 | |||
2281 | pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); | ||
2282 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
2283 | pte_val(pte) |= pte_sz_bits(page_size); | ||
2284 | |||
2285 | return pte; | ||
2286 | } | ||
2287 | |||
2288 | static unsigned long kern_large_tte(unsigned long paddr) | ||
2289 | { | ||
2290 | unsigned long val; | ||
2291 | |||
2292 | val = (_PAGE_VALID | _PAGE_SZ4MB_4U | | ||
2293 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | | ||
2294 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | ||
2295 | if (tlb_type == hypervisor) | ||
2296 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | ||
2297 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | ||
2298 | _PAGE_EXEC_4V | _PAGE_W_4V); | ||
2299 | |||
2300 | return val | paddr; | ||
2301 | } | ||
2302 | |||
2303 | /* If not locked, zap it. */ | ||
2304 | void __flush_tlb_all(void) | ||
2305 | { | ||
2306 | unsigned long pstate; | ||
2307 | int i; | ||
2308 | |||
2309 | __asm__ __volatile__("flushw\n\t" | ||
2310 | "rdpr %%pstate, %0\n\t" | ||
2311 | "wrpr %0, %1, %%pstate" | ||
2312 | : "=r" (pstate) | ||
2313 | : "i" (PSTATE_IE)); | ||
2314 | if (tlb_type == hypervisor) { | ||
2315 | sun4v_mmu_demap_all(); | ||
2316 | } else if (tlb_type == spitfire) { | ||
2317 | for (i = 0; i < 64; i++) { | ||
2318 | /* Spitfire Errata #32 workaround */ | ||
2319 | /* NOTE: Always runs on spitfire, so no | ||
2320 | * cheetah+ page size encodings. | ||
2321 | */ | ||
2322 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
2323 | "flush %%g6" | ||
2324 | : /* No outputs */ | ||
2325 | : "r" (0), | ||
2326 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
2327 | |||
2328 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { | ||
2329 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
2330 | "membar #Sync" | ||
2331 | : /* no outputs */ | ||
2332 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | ||
2333 | spitfire_put_dtlb_data(i, 0x0UL); | ||
2334 | } | ||
2335 | |||
2336 | /* Spitfire Errata #32 workaround */ | ||
2337 | /* NOTE: Always runs on spitfire, so no | ||
2338 | * cheetah+ page size encodings. | ||
2339 | */ | ||
2340 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | ||
2341 | "flush %%g6" | ||
2342 | : /* No outputs */ | ||
2343 | : "r" (0), | ||
2344 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | ||
2345 | |||
2346 | if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { | ||
2347 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | ||
2348 | "membar #Sync" | ||
2349 | : /* no outputs */ | ||
2350 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | ||
2351 | spitfire_put_itlb_data(i, 0x0UL); | ||
2352 | } | ||
2353 | } | ||
2354 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | ||
2355 | cheetah_flush_dtlb_all(); | ||
2356 | cheetah_flush_itlb_all(); | ||
2357 | } | ||
2358 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | ||
2359 | : : "r" (pstate)); | ||
2360 | } | ||
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h new file mode 100644 index 000000000000..16063870a489 --- /dev/null +++ b/arch/sparc/mm/init_64.h | |||
@@ -0,0 +1,49 @@ | |||
1 | #ifndef _SPARC64_MM_INIT_H | ||
2 | #define _SPARC64_MM_INIT_H | ||
3 | |||
4 | /* Most of the symbols in this file are defined in init.c and | ||
5 | * marked non-static so that assembler code can get at them. | ||
6 | */ | ||
7 | |||
8 | #define MAX_PHYS_ADDRESS (1UL << 42UL) | ||
9 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | ||
10 | #define KPTE_BITMAP_BYTES \ | ||
11 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) | ||
12 | |||
13 | extern unsigned long kern_linear_pte_xor[2]; | ||
14 | extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
15 | extern unsigned int sparc64_highest_unlocked_tlb_ent; | ||
16 | extern unsigned long sparc64_kern_pri_context; | ||
17 | extern unsigned long sparc64_kern_pri_nuc_bits; | ||
18 | extern unsigned long sparc64_kern_sec_context; | ||
19 | extern void mmu_info(struct seq_file *m); | ||
20 | |||
21 | struct linux_prom_translation { | ||
22 | unsigned long virt; | ||
23 | unsigned long size; | ||
24 | unsigned long data; | ||
25 | }; | ||
26 | |||
27 | /* Exported for kernel TLB miss handling in ktlb.S */ | ||
28 | extern struct linux_prom_translation prom_trans[512]; | ||
29 | extern unsigned int prom_trans_ents; | ||
30 | |||
31 | /* Exported for SMP bootup purposes. */ | ||
32 | extern unsigned long kern_locked_tte_data; | ||
33 | |||
34 | extern void prom_world(int enter); | ||
35 | |||
36 | extern void free_initmem(void); | ||
37 | |||
38 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
39 | #define VMEMMAP_CHUNK_SHIFT 22 | ||
40 | #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) | ||
41 | #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL) | ||
42 | #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) | ||
43 | |||
44 | #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ | ||
45 | sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) | ||
46 | extern unsigned long vmemmap_table[VMEMMAP_SIZE]; | ||
47 | #endif | ||
48 | |||
49 | #endif /* _SPARC64_MM_INIT_H */ | ||
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c new file mode 100644 index 000000000000..d8f21e24a82f --- /dev/null +++ b/arch/sparc/mm/tlb.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* arch/sparc64/mm/tlb.c | ||
2 | * | ||
3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/swap.h> | ||
11 | #include <linux/preempt.h> | ||
12 | |||
13 | #include <asm/pgtable.h> | ||
14 | #include <asm/pgalloc.h> | ||
15 | #include <asm/tlbflush.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/tlb.h> | ||
19 | |||
20 | /* Heavily inspired by the ppc64 code. */ | ||
21 | |||
22 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
23 | |||
24 | void flush_tlb_pending(void) | ||
25 | { | ||
26 | struct mmu_gather *mp = &get_cpu_var(mmu_gathers); | ||
27 | |||
28 | if (mp->tlb_nr) { | ||
29 | flush_tsb_user(mp); | ||
30 | |||
31 | if (CTX_VALID(mp->mm->context)) { | ||
32 | #ifdef CONFIG_SMP | ||
33 | smp_flush_tlb_pending(mp->mm, mp->tlb_nr, | ||
34 | &mp->vaddrs[0]); | ||
35 | #else | ||
36 | __flush_tlb_pending(CTX_HWBITS(mp->mm->context), | ||
37 | mp->tlb_nr, &mp->vaddrs[0]); | ||
38 | #endif | ||
39 | } | ||
40 | mp->tlb_nr = 0; | ||
41 | } | ||
42 | |||
43 | put_cpu_var(mmu_gathers); | ||
44 | } | ||
45 | |||
46 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) | ||
47 | { | ||
48 | struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); | ||
49 | unsigned long nr; | ||
50 | |||
51 | vaddr &= PAGE_MASK; | ||
52 | if (pte_exec(orig)) | ||
53 | vaddr |= 0x1UL; | ||
54 | |||
55 | if (tlb_type != hypervisor && | ||
56 | pte_dirty(orig)) { | ||
57 | unsigned long paddr, pfn = pte_pfn(orig); | ||
58 | struct address_space *mapping; | ||
59 | struct page *page; | ||
60 | |||
61 | if (!pfn_valid(pfn)) | ||
62 | goto no_cache_flush; | ||
63 | |||
64 | page = pfn_to_page(pfn); | ||
65 | if (PageReserved(page)) | ||
66 | goto no_cache_flush; | ||
67 | |||
68 | /* A real file page? */ | ||
69 | mapping = page_mapping(page); | ||
70 | if (!mapping) | ||
71 | goto no_cache_flush; | ||
72 | |||
73 | paddr = (unsigned long) page_address(page); | ||
74 | if ((paddr ^ vaddr) & (1 << 13)) | ||
75 | flush_dcache_page_all(mm, page); | ||
76 | } | ||
77 | |||
78 | no_cache_flush: | ||
79 | |||
80 | if (mp->fullmm) | ||
81 | return; | ||
82 | |||
83 | nr = mp->tlb_nr; | ||
84 | |||
85 | if (unlikely(nr != 0 && mm != mp->mm)) { | ||
86 | flush_tlb_pending(); | ||
87 | nr = 0; | ||
88 | } | ||
89 | |||
90 | if (nr == 0) | ||
91 | mp->mm = mm; | ||
92 | |||
93 | mp->vaddrs[nr] = vaddr; | ||
94 | mp->tlb_nr = ++nr; | ||
95 | if (nr >= TLB_BATCH_NR) | ||
96 | flush_tlb_pending(); | ||
97 | } | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c new file mode 100644 index 000000000000..f0282fad632a --- /dev/null +++ b/arch/sparc/mm/tsb.c | |||
@@ -0,0 +1,496 @@ | |||
1 | /* arch/sparc64/mm/tsb.c | ||
2 | * | ||
3 | * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/preempt.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/page.h> | ||
10 | #include <asm/tlbflush.h> | ||
11 | #include <asm/tlb.h> | ||
12 | #include <asm/mmu_context.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | #include <asm/tsb.h> | ||
15 | #include <asm/oplib.h> | ||
16 | |||
17 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
18 | |||
19 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) | ||
20 | { | ||
21 | vaddr >>= hash_shift; | ||
22 | return vaddr & (nentries - 1); | ||
23 | } | ||
24 | |||
25 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) | ||
26 | { | ||
27 | return (tag == (vaddr >> 22)); | ||
28 | } | ||
29 | |||
30 | /* TSB flushes need only occur on the processor initiating the address | ||
31 | * space modification, not on each cpu the address space has run on. | ||
32 | * Only the TLB flush needs that treatment. | ||
33 | */ | ||
34 | |||
35 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) | ||
36 | { | ||
37 | unsigned long v; | ||
38 | |||
39 | for (v = start; v < end; v += PAGE_SIZE) { | ||
40 | unsigned long hash = tsb_hash(v, PAGE_SHIFT, | ||
41 | KERNEL_TSB_NENTRIES); | ||
42 | struct tsb *ent = &swapper_tsb[hash]; | ||
43 | |||
44 | if (tag_compare(ent->tag, v)) | ||
45 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) | ||
50 | { | ||
51 | unsigned long i; | ||
52 | |||
53 | for (i = 0; i < mp->tlb_nr; i++) { | ||
54 | unsigned long v = mp->vaddrs[i]; | ||
55 | unsigned long tag, ent, hash; | ||
56 | |||
57 | v &= ~0x1UL; | ||
58 | |||
59 | hash = tsb_hash(v, hash_shift, nentries); | ||
60 | ent = tsb + (hash * sizeof(struct tsb)); | ||
61 | tag = (v >> 22UL); | ||
62 | |||
63 | tsb_flush(ent, tag); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | void flush_tsb_user(struct mmu_gather *mp) | ||
68 | { | ||
69 | struct mm_struct *mm = mp->mm; | ||
70 | unsigned long nentries, base, flags; | ||
71 | |||
72 | spin_lock_irqsave(&mm->context.lock, flags); | ||
73 | |||
74 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | ||
75 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | ||
76 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
77 | base = __pa(base); | ||
78 | __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); | ||
79 | |||
80 | #ifdef CONFIG_HUGETLB_PAGE | ||
81 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | ||
82 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | ||
83 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | ||
84 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
85 | base = __pa(base); | ||
86 | __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); | ||
87 | } | ||
88 | #endif | ||
89 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
90 | } | ||
91 | |||
92 | #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) | ||
93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | ||
94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | ||
95 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) | ||
96 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K | ||
97 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K | ||
98 | #else | ||
99 | #error Broken base page size setting... | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_HUGETLB_PAGE | ||
103 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
104 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K | ||
105 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K | ||
106 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
107 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K | ||
108 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K | ||
109 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
110 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB | ||
111 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB | ||
112 | #else | ||
113 | #error Broken huge page size setting... | ||
114 | #endif | ||
115 | #endif | ||
116 | |||
117 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) | ||
118 | { | ||
119 | unsigned long tsb_reg, base, tsb_paddr; | ||
120 | unsigned long page_sz, tte; | ||
121 | |||
122 | mm->context.tsb_block[tsb_idx].tsb_nentries = | ||
123 | tsb_bytes / sizeof(struct tsb); | ||
124 | |||
125 | base = TSBMAP_BASE; | ||
126 | tte = pgprot_val(PAGE_KERNEL_LOCKED); | ||
127 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); | ||
128 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); | ||
129 | |||
130 | /* Use the smallest page size that can map the whole TSB | ||
131 | * in one TLB entry. | ||
132 | */ | ||
133 | switch (tsb_bytes) { | ||
134 | case 8192 << 0: | ||
135 | tsb_reg = 0x0UL; | ||
136 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
137 | base += (tsb_paddr & 8192); | ||
138 | #endif | ||
139 | page_sz = 8192; | ||
140 | break; | ||
141 | |||
142 | case 8192 << 1: | ||
143 | tsb_reg = 0x1UL; | ||
144 | page_sz = 64 * 1024; | ||
145 | break; | ||
146 | |||
147 | case 8192 << 2: | ||
148 | tsb_reg = 0x2UL; | ||
149 | page_sz = 64 * 1024; | ||
150 | break; | ||
151 | |||
152 | case 8192 << 3: | ||
153 | tsb_reg = 0x3UL; | ||
154 | page_sz = 64 * 1024; | ||
155 | break; | ||
156 | |||
157 | case 8192 << 4: | ||
158 | tsb_reg = 0x4UL; | ||
159 | page_sz = 512 * 1024; | ||
160 | break; | ||
161 | |||
162 | case 8192 << 5: | ||
163 | tsb_reg = 0x5UL; | ||
164 | page_sz = 512 * 1024; | ||
165 | break; | ||
166 | |||
167 | case 8192 << 6: | ||
168 | tsb_reg = 0x6UL; | ||
169 | page_sz = 512 * 1024; | ||
170 | break; | ||
171 | |||
172 | case 8192 << 7: | ||
173 | tsb_reg = 0x7UL; | ||
174 | page_sz = 4 * 1024 * 1024; | ||
175 | break; | ||
176 | |||
177 | default: | ||
178 | printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", | ||
179 | current->comm, current->pid, tsb_bytes); | ||
180 | do_exit(SIGSEGV); | ||
181 | }; | ||
182 | tte |= pte_sz_bits(page_sz); | ||
183 | |||
184 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
185 | /* Physical mapping, no locked TLB entry for TSB. */ | ||
186 | tsb_reg |= tsb_paddr; | ||
187 | |||
188 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; | ||
189 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; | ||
190 | mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; | ||
191 | } else { | ||
192 | tsb_reg |= base; | ||
193 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); | ||
194 | tte |= (tsb_paddr & ~(page_sz - 1UL)); | ||
195 | |||
196 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; | ||
197 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; | ||
198 | mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; | ||
199 | } | ||
200 | |||
201 | /* Setup the Hypervisor TSB descriptor. */ | ||
202 | if (tlb_type == hypervisor) { | ||
203 | struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; | ||
204 | |||
205 | switch (tsb_idx) { | ||
206 | case MM_TSB_BASE: | ||
207 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; | ||
208 | break; | ||
209 | #ifdef CONFIG_HUGETLB_PAGE | ||
210 | case MM_TSB_HUGE: | ||
211 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; | ||
212 | break; | ||
213 | #endif | ||
214 | default: | ||
215 | BUG(); | ||
216 | }; | ||
217 | hp->assoc = 1; | ||
218 | hp->num_ttes = tsb_bytes / 16; | ||
219 | hp->ctx_idx = 0; | ||
220 | switch (tsb_idx) { | ||
221 | case MM_TSB_BASE: | ||
222 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; | ||
223 | break; | ||
224 | #ifdef CONFIG_HUGETLB_PAGE | ||
225 | case MM_TSB_HUGE: | ||
226 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; | ||
227 | break; | ||
228 | #endif | ||
229 | default: | ||
230 | BUG(); | ||
231 | }; | ||
232 | hp->tsb_base = tsb_paddr; | ||
233 | hp->resv = 0; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static struct kmem_cache *tsb_caches[8] __read_mostly; | ||
238 | |||
239 | static const char *tsb_cache_names[8] = { | ||
240 | "tsb_8KB", | ||
241 | "tsb_16KB", | ||
242 | "tsb_32KB", | ||
243 | "tsb_64KB", | ||
244 | "tsb_128KB", | ||
245 | "tsb_256KB", | ||
246 | "tsb_512KB", | ||
247 | "tsb_1MB", | ||
248 | }; | ||
249 | |||
250 | void __init pgtable_cache_init(void) | ||
251 | { | ||
252 | unsigned long i; | ||
253 | |||
254 | for (i = 0; i < 8; i++) { | ||
255 | unsigned long size = 8192 << i; | ||
256 | const char *name = tsb_cache_names[i]; | ||
257 | |||
258 | tsb_caches[i] = kmem_cache_create(name, | ||
259 | size, size, | ||
260 | 0, NULL); | ||
261 | if (!tsb_caches[i]) { | ||
262 | prom_printf("Could not create %s cache\n", name); | ||
263 | prom_halt(); | ||
264 | } | ||
265 | } | ||
266 | } | ||
267 | |||
268 | /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, | ||
269 | * do_sparc64_fault() invokes this routine to try and grow it. | ||
270 | * | ||
271 | * When we reach the maximum TSB size supported, we stick ~0UL into | ||
272 | * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() | ||
273 | * will not trigger any longer. | ||
274 | * | ||
275 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers | ||
276 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB | ||
277 | * must be 512K aligned. It also must be physically contiguous, so we | ||
278 | * cannot use vmalloc(). | ||
279 | * | ||
280 | * The idea here is to grow the TSB when the RSS of the process approaches | ||
281 | * the number of entries that the current TSB can hold at once. Currently, | ||
282 | * we trigger when the RSS hits 3/4 of the TSB capacity. | ||
283 | */ | ||
284 | void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) | ||
285 | { | ||
286 | unsigned long max_tsb_size = 1 * 1024 * 1024; | ||
287 | unsigned long new_size, old_size, flags; | ||
288 | struct tsb *old_tsb, *new_tsb; | ||
289 | unsigned long new_cache_index, old_cache_index; | ||
290 | unsigned long new_rss_limit; | ||
291 | gfp_t gfp_flags; | ||
292 | |||
293 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) | ||
294 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); | ||
295 | |||
296 | new_cache_index = 0; | ||
297 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { | ||
298 | unsigned long n_entries = new_size / sizeof(struct tsb); | ||
299 | |||
300 | n_entries = (n_entries * 3) / 4; | ||
301 | if (n_entries > rss) | ||
302 | break; | ||
303 | |||
304 | new_cache_index++; | ||
305 | } | ||
306 | |||
307 | if (new_size == max_tsb_size) | ||
308 | new_rss_limit = ~0UL; | ||
309 | else | ||
310 | new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; | ||
311 | |||
312 | retry_tsb_alloc: | ||
313 | gfp_flags = GFP_KERNEL; | ||
314 | if (new_size > (PAGE_SIZE * 2)) | ||
315 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; | ||
316 | |||
317 | new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], | ||
318 | gfp_flags, numa_node_id()); | ||
319 | if (unlikely(!new_tsb)) { | ||
320 | /* Not being able to fork due to a high-order TSB | ||
321 | * allocation failure is very bad behavior. Just back | ||
322 | * down to a 0-order allocation and force no TSB | ||
323 | * growing for this address space. | ||
324 | */ | ||
325 | if (mm->context.tsb_block[tsb_index].tsb == NULL && | ||
326 | new_cache_index > 0) { | ||
327 | new_cache_index = 0; | ||
328 | new_size = 8192; | ||
329 | new_rss_limit = ~0UL; | ||
330 | goto retry_tsb_alloc; | ||
331 | } | ||
332 | |||
333 | /* If we failed on a TSB grow, we are under serious | ||
334 | * memory pressure so don't try to grow any more. | ||
335 | */ | ||
336 | if (mm->context.tsb_block[tsb_index].tsb != NULL) | ||
337 | mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; | ||
338 | return; | ||
339 | } | ||
340 | |||
341 | /* Mark all tags as invalid. */ | ||
342 | tsb_init(new_tsb, new_size); | ||
343 | |||
344 | /* Ok, we are about to commit the changes. If we are | ||
345 | * growing an existing TSB the locking is very tricky, | ||
346 | * so WATCH OUT! | ||
347 | * | ||
348 | * We have to hold mm->context.lock while committing to the | ||
349 | * new TSB, this synchronizes us with processors in | ||
350 | * flush_tsb_user() and switch_mm() for this address space. | ||
351 | * | ||
352 | * But even with that lock held, processors run asynchronously | ||
353 | * accessing the old TSB via TLB miss handling. This is OK | ||
354 | * because those actions are just propagating state from the | ||
355 | * Linux page tables into the TSB, page table mappings are not | ||
356 | * being changed. If a real fault occurs, the processor will | ||
357 | * synchronize with us when it hits flush_tsb_user(), this is | ||
358 | * also true for the case where vmscan is modifying the page | ||
359 | * tables. The only thing we need to be careful with is to | ||
360 | * skip any locked TSB entries during copy_tsb(). | ||
361 | * | ||
362 | * When we finish committing to the new TSB, we have to drop | ||
363 | * the lock and ask all other cpus running this address space | ||
364 | * to run tsb_context_switch() to see the new TSB table. | ||
365 | */ | ||
366 | spin_lock_irqsave(&mm->context.lock, flags); | ||
367 | |||
368 | old_tsb = mm->context.tsb_block[tsb_index].tsb; | ||
369 | old_cache_index = | ||
370 | (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); | ||
371 | old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * | ||
372 | sizeof(struct tsb)); | ||
373 | |||
374 | |||
375 | /* Handle multiple threads trying to grow the TSB at the same time. | ||
376 | * One will get in here first, and bump the size and the RSS limit. | ||
377 | * The others will get in here next and hit this check. | ||
378 | */ | ||
379 | if (unlikely(old_tsb && | ||
380 | (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { | ||
381 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
382 | |||
383 | kmem_cache_free(tsb_caches[new_cache_index], new_tsb); | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; | ||
388 | |||
389 | if (old_tsb) { | ||
390 | extern void copy_tsb(unsigned long old_tsb_base, | ||
391 | unsigned long old_tsb_size, | ||
392 | unsigned long new_tsb_base, | ||
393 | unsigned long new_tsb_size); | ||
394 | unsigned long old_tsb_base = (unsigned long) old_tsb; | ||
395 | unsigned long new_tsb_base = (unsigned long) new_tsb; | ||
396 | |||
397 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
398 | old_tsb_base = __pa(old_tsb_base); | ||
399 | new_tsb_base = __pa(new_tsb_base); | ||
400 | } | ||
401 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | ||
402 | } | ||
403 | |||
404 | mm->context.tsb_block[tsb_index].tsb = new_tsb; | ||
405 | setup_tsb_params(mm, tsb_index, new_size); | ||
406 | |||
407 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
408 | |||
409 | /* If old_tsb is NULL, we're being invoked for the first time | ||
410 | * from init_new_context(). | ||
411 | */ | ||
412 | if (old_tsb) { | ||
413 | /* Reload it on the local cpu. */ | ||
414 | tsb_context_switch(mm); | ||
415 | |||
416 | /* Now force other processors to do the same. */ | ||
417 | preempt_disable(); | ||
418 | smp_tsb_sync(mm); | ||
419 | preempt_enable(); | ||
420 | |||
421 | /* Now it is safe to free the old tsb. */ | ||
422 | kmem_cache_free(tsb_caches[old_cache_index], old_tsb); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
427 | { | ||
428 | #ifdef CONFIG_HUGETLB_PAGE | ||
429 | unsigned long huge_pte_count; | ||
430 | #endif | ||
431 | unsigned int i; | ||
432 | |||
433 | spin_lock_init(&mm->context.lock); | ||
434 | |||
435 | mm->context.sparc64_ctx_val = 0UL; | ||
436 | |||
437 | #ifdef CONFIG_HUGETLB_PAGE | ||
438 | /* We reset it to zero because the fork() page copying | ||
439 | * will re-increment the counters as the parent PTEs are | ||
440 | * copied into the child address space. | ||
441 | */ | ||
442 | huge_pte_count = mm->context.huge_pte_count; | ||
443 | mm->context.huge_pte_count = 0; | ||
444 | #endif | ||
445 | |||
446 | /* copy_mm() copies over the parent's mm_struct before calling | ||
447 | * us, so we need to zero out the TSB pointer or else tsb_grow() | ||
448 | * will be confused and think there is an older TSB to free up. | ||
449 | */ | ||
450 | for (i = 0; i < MM_NUM_TSBS; i++) | ||
451 | mm->context.tsb_block[i].tsb = NULL; | ||
452 | |||
453 | /* If this is fork, inherit the parent's TSB size. We would | ||
454 | * grow it to that size on the first page fault anyways. | ||
455 | */ | ||
456 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); | ||
457 | |||
458 | #ifdef CONFIG_HUGETLB_PAGE | ||
459 | if (unlikely(huge_pte_count)) | ||
460 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); | ||
461 | #endif | ||
462 | |||
463 | if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) | ||
464 | return -ENOMEM; | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | static void tsb_destroy_one(struct tsb_config *tp) | ||
470 | { | ||
471 | unsigned long cache_index; | ||
472 | |||
473 | if (!tp->tsb) | ||
474 | return; | ||
475 | cache_index = tp->tsb_reg_val & 0x7UL; | ||
476 | kmem_cache_free(tsb_caches[cache_index], tp->tsb); | ||
477 | tp->tsb = NULL; | ||
478 | tp->tsb_reg_val = 0UL; | ||
479 | } | ||
480 | |||
481 | void destroy_context(struct mm_struct *mm) | ||
482 | { | ||
483 | unsigned long flags, i; | ||
484 | |||
485 | for (i = 0; i < MM_NUM_TSBS; i++) | ||
486 | tsb_destroy_one(&mm->context.tsb_block[i]); | ||
487 | |||
488 | spin_lock_irqsave(&ctx_alloc_lock, flags); | ||
489 | |||
490 | if (CTX_VALID(mm->context)) { | ||
491 | unsigned long nr = CTX_NRBITS(mm->context); | ||
492 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | ||
493 | } | ||
494 | |||
495 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | ||
496 | } | ||
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S new file mode 100644 index 000000000000..e4c146f7c7e9 --- /dev/null +++ b/arch/sparc/mm/ultra.S | |||
@@ -0,0 +1,767 @@ | |||
1 | /* | ||
2 | * ultra.S: Don't expand these all over the place... | ||
3 | * | ||
4 | * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) | ||
5 | */ | ||
6 | |||
7 | #include <asm/asi.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | #include <asm/page.h> | ||
10 | #include <asm/spitfire.h> | ||
11 | #include <asm/mmu_context.h> | ||
12 | #include <asm/mmu.h> | ||
13 | #include <asm/pil.h> | ||
14 | #include <asm/head.h> | ||
15 | #include <asm/thread_info.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/hypervisor.h> | ||
18 | #include <asm/cpudata.h> | ||
19 | |||
20 | /* Basically, most of the Spitfire vs. Cheetah madness | ||
21 | * has to do with the fact that Cheetah does not support | ||
22 | * IMMU flushes out of the secondary context. Someone needs | ||
23 | * to throw a south lake birthday party for the folks | ||
24 | * in Microelectronics who refused to fix this shit. | ||
25 | */ | ||
26 | |||
27 | /* This file is meant to be read efficiently by the CPU, not humans. | ||
28 | * Staraj sie tego nikomu nie pierdolnac... | ||
29 | */ | ||
30 | .text | ||
31 | .align 32 | ||
32 | .globl __flush_tlb_mm | ||
33 | __flush_tlb_mm: /* 18 insns */ | ||
34 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | ||
35 | ldxa [%o1] ASI_DMMU, %g2 | ||
36 | cmp %g2, %o0 | ||
37 | bne,pn %icc, __spitfire_flush_tlb_mm_slow | ||
38 | mov 0x50, %g3 | ||
39 | stxa %g0, [%g3] ASI_DMMU_DEMAP | ||
40 | stxa %g0, [%g3] ASI_IMMU_DEMAP | ||
41 | sethi %hi(KERNBASE), %g3 | ||
42 | flush %g3 | ||
43 | retl | ||
44 | nop | ||
45 | nop | ||
46 | nop | ||
47 | nop | ||
48 | nop | ||
49 | nop | ||
50 | nop | ||
51 | nop | ||
52 | nop | ||
53 | nop | ||
54 | |||
55 | .align 32 | ||
56 | .globl __flush_tlb_pending | ||
57 | __flush_tlb_pending: /* 26 insns */ | ||
58 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | ||
59 | rdpr %pstate, %g7 | ||
60 | sllx %o1, 3, %o1 | ||
61 | andn %g7, PSTATE_IE, %g2 | ||
62 | wrpr %g2, %pstate | ||
63 | mov SECONDARY_CONTEXT, %o4 | ||
64 | ldxa [%o4] ASI_DMMU, %g2 | ||
65 | stxa %o0, [%o4] ASI_DMMU | ||
66 | 1: sub %o1, (1 << 3), %o1 | ||
67 | ldx [%o2 + %o1], %o3 | ||
68 | andcc %o3, 1, %g0 | ||
69 | andn %o3, 1, %o3 | ||
70 | be,pn %icc, 2f | ||
71 | or %o3, 0x10, %o3 | ||
72 | stxa %g0, [%o3] ASI_IMMU_DEMAP | ||
73 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP | ||
74 | membar #Sync | ||
75 | brnz,pt %o1, 1b | ||
76 | nop | ||
77 | stxa %g2, [%o4] ASI_DMMU | ||
78 | sethi %hi(KERNBASE), %o4 | ||
79 | flush %o4 | ||
80 | retl | ||
81 | wrpr %g7, 0x0, %pstate | ||
82 | nop | ||
83 | nop | ||
84 | nop | ||
85 | nop | ||
86 | |||
87 | .align 32 | ||
88 | .globl __flush_tlb_kernel_range | ||
89 | __flush_tlb_kernel_range: /* 16 insns */ | ||
90 | /* %o0=start, %o1=end */ | ||
91 | cmp %o0, %o1 | ||
92 | be,pn %xcc, 2f | ||
93 | sethi %hi(PAGE_SIZE), %o4 | ||
94 | sub %o1, %o0, %o3 | ||
95 | sub %o3, %o4, %o3 | ||
96 | or %o0, 0x20, %o0 ! Nucleus | ||
97 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP | ||
98 | stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP | ||
99 | membar #Sync | ||
100 | brnz,pt %o3, 1b | ||
101 | sub %o3, %o4, %o3 | ||
102 | 2: sethi %hi(KERNBASE), %o3 | ||
103 | flush %o3 | ||
104 | retl | ||
105 | nop | ||
106 | nop | ||
107 | |||
108 | __spitfire_flush_tlb_mm_slow: | ||
109 | rdpr %pstate, %g1 | ||
110 | wrpr %g1, PSTATE_IE, %pstate | ||
111 | stxa %o0, [%o1] ASI_DMMU | ||
112 | stxa %g0, [%g3] ASI_DMMU_DEMAP | ||
113 | stxa %g0, [%g3] ASI_IMMU_DEMAP | ||
114 | flush %g6 | ||
115 | stxa %g2, [%o1] ASI_DMMU | ||
116 | sethi %hi(KERNBASE), %o1 | ||
117 | flush %o1 | ||
118 | retl | ||
119 | wrpr %g1, 0, %pstate | ||
120 | |||
121 | /* | ||
122 | * The following code flushes one page_size worth. | ||
123 | */ | ||
124 | .section .kprobes.text, "ax" | ||
125 | .align 32 | ||
126 | .globl __flush_icache_page | ||
127 | __flush_icache_page: /* %o0 = phys_page */ | ||
128 | srlx %o0, PAGE_SHIFT, %o0 | ||
129 | sethi %uhi(PAGE_OFFSET), %g1 | ||
130 | sllx %o0, PAGE_SHIFT, %o0 | ||
131 | sethi %hi(PAGE_SIZE), %g2 | ||
132 | sllx %g1, 32, %g1 | ||
133 | add %o0, %g1, %o0 | ||
134 | 1: subcc %g2, 32, %g2 | ||
135 | bne,pt %icc, 1b | ||
136 | flush %o0 + %g2 | ||
137 | retl | ||
138 | nop | ||
139 | |||
140 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
141 | |||
142 | #if (PAGE_SHIFT != 13) | ||
143 | #error only page shift of 13 is supported by dcache flush | ||
144 | #endif | ||
145 | |||
146 | #define DTAG_MASK 0x3 | ||
147 | |||
148 | /* This routine is Spitfire specific so the hardcoded | ||
149 | * D-cache size and line-size are OK. | ||
150 | */ | ||
151 | .align 64 | ||
152 | .globl __flush_dcache_page | ||
153 | __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ | ||
154 | sethi %uhi(PAGE_OFFSET), %g1 | ||
155 | sllx %g1, 32, %g1 | ||
156 | sub %o0, %g1, %o0 ! physical address | ||
157 | srlx %o0, 11, %o0 ! make D-cache TAG | ||
158 | sethi %hi(1 << 14), %o2 ! D-cache size | ||
159 | sub %o2, (1 << 5), %o2 ! D-cache line size | ||
160 | 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG | ||
161 | andcc %o3, DTAG_MASK, %g0 ! Valid? | ||
162 | be,pn %xcc, 2f ! Nope, branch | ||
163 | andn %o3, DTAG_MASK, %o3 ! Clear valid bits | ||
164 | cmp %o3, %o0 ! TAG match? | ||
165 | bne,pt %xcc, 2f ! Nope, branch | ||
166 | nop | ||
167 | stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG | ||
168 | membar #Sync | ||
169 | 2: brnz,pt %o2, 1b | ||
170 | sub %o2, (1 << 5), %o2 ! D-cache line size | ||
171 | |||
172 | /* The I-cache does not snoop local stores so we | ||
173 | * better flush that too when necessary. | ||
174 | */ | ||
175 | brnz,pt %o1, __flush_icache_page | ||
176 | sllx %o0, 11, %o0 | ||
177 | retl | ||
178 | nop | ||
179 | |||
180 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
181 | |||
182 | .previous | ||
183 | |||
184 | /* Cheetah specific versions, patched at boot time. */ | ||
185 | __cheetah_flush_tlb_mm: /* 19 insns */ | ||
186 | rdpr %pstate, %g7 | ||
187 | andn %g7, PSTATE_IE, %g2 | ||
188 | wrpr %g2, 0x0, %pstate | ||
189 | wrpr %g0, 1, %tl | ||
190 | mov PRIMARY_CONTEXT, %o2 | ||
191 | mov 0x40, %g3 | ||
192 | ldxa [%o2] ASI_DMMU, %g2 | ||
193 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 | ||
194 | sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 | ||
195 | or %o0, %o1, %o0 /* Preserve nucleus page size fields */ | ||
196 | stxa %o0, [%o2] ASI_DMMU | ||
197 | stxa %g0, [%g3] ASI_DMMU_DEMAP | ||
198 | stxa %g0, [%g3] ASI_IMMU_DEMAP | ||
199 | stxa %g2, [%o2] ASI_DMMU | ||
200 | sethi %hi(KERNBASE), %o2 | ||
201 | flush %o2 | ||
202 | wrpr %g0, 0, %tl | ||
203 | retl | ||
204 | wrpr %g7, 0x0, %pstate | ||
205 | |||
206 | __cheetah_flush_tlb_pending: /* 27 insns */ | ||
207 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | ||
208 | rdpr %pstate, %g7 | ||
209 | sllx %o1, 3, %o1 | ||
210 | andn %g7, PSTATE_IE, %g2 | ||
211 | wrpr %g2, 0x0, %pstate | ||
212 | wrpr %g0, 1, %tl | ||
213 | mov PRIMARY_CONTEXT, %o4 | ||
214 | ldxa [%o4] ASI_DMMU, %g2 | ||
215 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 | ||
216 | sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 | ||
217 | or %o0, %o3, %o0 /* Preserve nucleus page size fields */ | ||
218 | stxa %o0, [%o4] ASI_DMMU | ||
219 | 1: sub %o1, (1 << 3), %o1 | ||
220 | ldx [%o2 + %o1], %o3 | ||
221 | andcc %o3, 1, %g0 | ||
222 | be,pn %icc, 2f | ||
223 | andn %o3, 1, %o3 | ||
224 | stxa %g0, [%o3] ASI_IMMU_DEMAP | ||
225 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP | ||
226 | membar #Sync | ||
227 | brnz,pt %o1, 1b | ||
228 | nop | ||
229 | stxa %g2, [%o4] ASI_DMMU | ||
230 | sethi %hi(KERNBASE), %o4 | ||
231 | flush %o4 | ||
232 | wrpr %g0, 0, %tl | ||
233 | retl | ||
234 | wrpr %g7, 0x0, %pstate | ||
235 | |||
236 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
237 | __cheetah_flush_dcache_page: /* 11 insns */ | ||
238 | sethi %uhi(PAGE_OFFSET), %g1 | ||
239 | sllx %g1, 32, %g1 | ||
240 | sub %o0, %g1, %o0 | ||
241 | sethi %hi(PAGE_SIZE), %o4 | ||
242 | 1: subcc %o4, (1 << 5), %o4 | ||
243 | stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE | ||
244 | membar #Sync | ||
245 | bne,pt %icc, 1b | ||
246 | nop | ||
247 | retl /* I-cache flush never needed on Cheetah, see callers. */ | ||
248 | nop | ||
249 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
250 | |||
251 | /* Hypervisor specific versions, patched at boot time. */ | ||
252 | __hypervisor_tlb_tl0_error: | ||
253 | save %sp, -192, %sp | ||
254 | mov %i0, %o0 | ||
255 | call hypervisor_tlbop_error | ||
256 | mov %i1, %o1 | ||
257 | ret | ||
258 | restore | ||
259 | |||
260 | __hypervisor_flush_tlb_mm: /* 10 insns */ | ||
261 | mov %o0, %o2 /* ARG2: mmu context */ | ||
262 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | ||
263 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | ||
264 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
265 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
266 | ta HV_FAST_TRAP | ||
267 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
268 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | ||
269 | retl | ||
270 | nop | ||
271 | |||
272 | __hypervisor_flush_tlb_pending: /* 16 insns */ | ||
273 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | ||
274 | sllx %o1, 3, %g1 | ||
275 | mov %o2, %g2 | ||
276 | mov %o0, %g3 | ||
277 | 1: sub %g1, (1 << 3), %g1 | ||
278 | ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ | ||
279 | mov %g3, %o1 /* ARG1: mmu context */ | ||
280 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
281 | srlx %o0, PAGE_SHIFT, %o0 | ||
282 | sllx %o0, PAGE_SHIFT, %o0 | ||
283 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
284 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
285 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
286 | brnz,pt %g1, 1b | ||
287 | nop | ||
288 | retl | ||
289 | nop | ||
290 | |||
291 | __hypervisor_flush_tlb_kernel_range: /* 16 insns */ | ||
292 | /* %o0=start, %o1=end */ | ||
293 | cmp %o0, %o1 | ||
294 | be,pn %xcc, 2f | ||
295 | sethi %hi(PAGE_SIZE), %g3 | ||
296 | mov %o0, %g1 | ||
297 | sub %o1, %g1, %g2 | ||
298 | sub %g2, %g3, %g2 | ||
299 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ | ||
300 | mov 0, %o1 /* ARG1: mmu context */ | ||
301 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
302 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
303 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
304 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
305 | brnz,pt %g2, 1b | ||
306 | sub %g2, %g3, %g2 | ||
307 | 2: retl | ||
308 | nop | ||
309 | |||
310 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
311 | /* XXX Niagara and friends have an 8K cache, so no aliasing is | ||
312 | * XXX possible, but nothing explicit in the Hypervisor API | ||
313 | * XXX guarantees this. | ||
314 | */ | ||
315 | __hypervisor_flush_dcache_page: /* 2 insns */ | ||
316 | retl | ||
317 | nop | ||
318 | #endif | ||
319 | |||
320 | tlb_patch_one: | ||
321 | 1: lduw [%o1], %g1 | ||
322 | stw %g1, [%o0] | ||
323 | flush %o0 | ||
324 | subcc %o2, 1, %o2 | ||
325 | add %o1, 4, %o1 | ||
326 | bne,pt %icc, 1b | ||
327 | add %o0, 4, %o0 | ||
328 | retl | ||
329 | nop | ||
330 | |||
331 | .globl cheetah_patch_cachetlbops | ||
332 | cheetah_patch_cachetlbops: | ||
333 | save %sp, -128, %sp | ||
334 | |||
335 | sethi %hi(__flush_tlb_mm), %o0 | ||
336 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
337 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | ||
338 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | ||
339 | call tlb_patch_one | ||
340 | mov 19, %o2 | ||
341 | |||
342 | sethi %hi(__flush_tlb_pending), %o0 | ||
343 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
344 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | ||
345 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | ||
346 | call tlb_patch_one | ||
347 | mov 27, %o2 | ||
348 | |||
349 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
350 | sethi %hi(__flush_dcache_page), %o0 | ||
351 | or %o0, %lo(__flush_dcache_page), %o0 | ||
352 | sethi %hi(__cheetah_flush_dcache_page), %o1 | ||
353 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | ||
354 | call tlb_patch_one | ||
355 | mov 11, %o2 | ||
356 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
357 | |||
358 | ret | ||
359 | restore | ||
360 | |||
361 | #ifdef CONFIG_SMP | ||
362 | /* These are all called by the slaves of a cross call, at | ||
363 | * trap level 1, with interrupts fully disabled. | ||
364 | * | ||
365 | * Register usage: | ||
366 | * %g5 mm->context (all tlb flushes) | ||
367 | * %g1 address arg 1 (tlb page and range flushes) | ||
368 | * %g7 address arg 2 (tlb range flush only) | ||
369 | * | ||
370 | * %g6 scratch 1 | ||
371 | * %g2 scratch 2 | ||
372 | * %g3 scratch 3 | ||
373 | * %g4 scratch 4 | ||
374 | */ | ||
375 | .align 32 | ||
376 | .globl xcall_flush_tlb_mm | ||
377 | xcall_flush_tlb_mm: /* 21 insns */ | ||
378 | mov PRIMARY_CONTEXT, %g2 | ||
379 | ldxa [%g2] ASI_DMMU, %g3 | ||
380 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 | ||
381 | sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 | ||
382 | or %g5, %g4, %g5 /* Preserve nucleus page size fields */ | ||
383 | stxa %g5, [%g2] ASI_DMMU | ||
384 | mov 0x40, %g4 | ||
385 | stxa %g0, [%g4] ASI_DMMU_DEMAP | ||
386 | stxa %g0, [%g4] ASI_IMMU_DEMAP | ||
387 | stxa %g3, [%g2] ASI_DMMU | ||
388 | retry | ||
389 | nop | ||
390 | nop | ||
391 | nop | ||
392 | nop | ||
393 | nop | ||
394 | nop | ||
395 | nop | ||
396 | nop | ||
397 | nop | ||
398 | nop | ||
399 | |||
400 | .globl xcall_flush_tlb_pending | ||
401 | xcall_flush_tlb_pending: /* 21 insns */ | ||
402 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ | ||
403 | sllx %g1, 3, %g1 | ||
404 | mov PRIMARY_CONTEXT, %g4 | ||
405 | ldxa [%g4] ASI_DMMU, %g2 | ||
406 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 | ||
407 | sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 | ||
408 | or %g5, %g4, %g5 | ||
409 | mov PRIMARY_CONTEXT, %g4 | ||
410 | stxa %g5, [%g4] ASI_DMMU | ||
411 | 1: sub %g1, (1 << 3), %g1 | ||
412 | ldx [%g7 + %g1], %g5 | ||
413 | andcc %g5, 0x1, %g0 | ||
414 | be,pn %icc, 2f | ||
415 | |||
416 | andn %g5, 0x1, %g5 | ||
417 | stxa %g0, [%g5] ASI_IMMU_DEMAP | ||
418 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP | ||
419 | membar #Sync | ||
420 | brnz,pt %g1, 1b | ||
421 | nop | ||
422 | stxa %g2, [%g4] ASI_DMMU | ||
423 | retry | ||
424 | nop | ||
425 | |||
426 | .globl xcall_flush_tlb_kernel_range | ||
427 | xcall_flush_tlb_kernel_range: /* 25 insns */ | ||
428 | sethi %hi(PAGE_SIZE - 1), %g2 | ||
429 | or %g2, %lo(PAGE_SIZE - 1), %g2 | ||
430 | andn %g1, %g2, %g1 | ||
431 | andn %g7, %g2, %g7 | ||
432 | sub %g7, %g1, %g3 | ||
433 | add %g2, 1, %g2 | ||
434 | sub %g3, %g2, %g3 | ||
435 | or %g1, 0x20, %g1 ! Nucleus | ||
436 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP | ||
437 | stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP | ||
438 | membar #Sync | ||
439 | brnz,pt %g3, 1b | ||
440 | sub %g3, %g2, %g3 | ||
441 | retry | ||
442 | nop | ||
443 | nop | ||
444 | nop | ||
445 | nop | ||
446 | nop | ||
447 | nop | ||
448 | nop | ||
449 | nop | ||
450 | nop | ||
451 | nop | ||
452 | nop | ||
453 | |||
454 | /* This runs in a very controlled environment, so we do | ||
455 | * not need to worry about BH races etc. | ||
456 | */ | ||
457 | .globl xcall_sync_tick | ||
458 | xcall_sync_tick: | ||
459 | |||
460 | 661: rdpr %pstate, %g2 | ||
461 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | ||
462 | .section .sun4v_2insn_patch, "ax" | ||
463 | .word 661b | ||
464 | nop | ||
465 | nop | ||
466 | .previous | ||
467 | |||
468 | rdpr %pil, %g2 | ||
469 | wrpr %g0, 15, %pil | ||
470 | sethi %hi(109f), %g7 | ||
471 | b,pt %xcc, etrap_irq | ||
472 | 109: or %g7, %lo(109b), %g7 | ||
473 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
474 | call trace_hardirqs_off | ||
475 | nop | ||
476 | #endif | ||
477 | call smp_synchronize_tick_client | ||
478 | nop | ||
479 | b rtrap_xcall | ||
480 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
481 | |||
482 | .globl xcall_fetch_glob_regs | ||
483 | xcall_fetch_glob_regs: | ||
484 | sethi %hi(global_reg_snapshot), %g1 | ||
485 | or %g1, %lo(global_reg_snapshot), %g1 | ||
486 | __GET_CPUID(%g2) | ||
487 | sllx %g2, 6, %g3 | ||
488 | add %g1, %g3, %g1 | ||
489 | rdpr %tstate, %g7 | ||
490 | stx %g7, [%g1 + GR_SNAP_TSTATE] | ||
491 | rdpr %tpc, %g7 | ||
492 | stx %g7, [%g1 + GR_SNAP_TPC] | ||
493 | rdpr %tnpc, %g7 | ||
494 | stx %g7, [%g1 + GR_SNAP_TNPC] | ||
495 | stx %o7, [%g1 + GR_SNAP_O7] | ||
496 | stx %i7, [%g1 + GR_SNAP_I7] | ||
497 | /* Don't try this at home kids... */ | ||
498 | rdpr %cwp, %g2 | ||
499 | sub %g2, 1, %g7 | ||
500 | wrpr %g7, %cwp | ||
501 | mov %i7, %g7 | ||
502 | wrpr %g2, %cwp | ||
503 | stx %g7, [%g1 + GR_SNAP_RPC] | ||
504 | sethi %hi(trap_block), %g7 | ||
505 | or %g7, %lo(trap_block), %g7 | ||
506 | sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 | ||
507 | add %g7, %g2, %g7 | ||
508 | ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 | ||
509 | stx %g3, [%g1 + GR_SNAP_THREAD] | ||
510 | retry | ||
511 | |||
512 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
513 | .align 32 | ||
514 | .globl xcall_flush_dcache_page_cheetah | ||
515 | xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ | ||
516 | sethi %hi(PAGE_SIZE), %g3 | ||
517 | 1: subcc %g3, (1 << 5), %g3 | ||
518 | stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE | ||
519 | membar #Sync | ||
520 | bne,pt %icc, 1b | ||
521 | nop | ||
522 | retry | ||
523 | nop | ||
524 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
525 | |||
526 | .globl xcall_flush_dcache_page_spitfire | ||
527 | xcall_flush_dcache_page_spitfire: /* %g1 == physical page address | ||
528 | %g7 == kernel page virtual address | ||
529 | %g5 == (page->mapping != NULL) */ | ||
530 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
531 | srlx %g1, (13 - 2), %g1 ! Form tag comparitor | ||
532 | sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K | ||
533 | sub %g3, (1 << 5), %g3 ! D$ linesize == 32 | ||
534 | 1: ldxa [%g3] ASI_DCACHE_TAG, %g2 | ||
535 | andcc %g2, 0x3, %g0 | ||
536 | be,pn %xcc, 2f | ||
537 | andn %g2, 0x3, %g2 | ||
538 | cmp %g2, %g1 | ||
539 | |||
540 | bne,pt %xcc, 2f | ||
541 | nop | ||
542 | stxa %g0, [%g3] ASI_DCACHE_TAG | ||
543 | membar #Sync | ||
544 | 2: cmp %g3, 0 | ||
545 | bne,pt %xcc, 1b | ||
546 | sub %g3, (1 << 5), %g3 | ||
547 | |||
548 | brz,pn %g5, 2f | ||
549 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
550 | sethi %hi(PAGE_SIZE), %g3 | ||
551 | |||
552 | 1: flush %g7 | ||
553 | subcc %g3, (1 << 5), %g3 | ||
554 | bne,pt %icc, 1b | ||
555 | add %g7, (1 << 5), %g7 | ||
556 | |||
557 | 2: retry | ||
558 | nop | ||
559 | nop | ||
560 | |||
561 | /* %g5: error | ||
562 | * %g6: tlb op | ||
563 | */ | ||
564 | __hypervisor_tlb_xcall_error: | ||
565 | mov %g5, %g4 | ||
566 | mov %g6, %g5 | ||
567 | ba,pt %xcc, etrap | ||
568 | rd %pc, %g7 | ||
569 | mov %l4, %o0 | ||
570 | call hypervisor_tlbop_error_xcall | ||
571 | mov %l5, %o1 | ||
572 | ba,a,pt %xcc, rtrap | ||
573 | |||
574 | .globl __hypervisor_xcall_flush_tlb_mm | ||
575 | __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ | ||
576 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ | ||
577 | mov %o0, %g2 | ||
578 | mov %o1, %g3 | ||
579 | mov %o2, %g4 | ||
580 | mov %o3, %g1 | ||
581 | mov %o5, %g7 | ||
582 | clr %o0 /* ARG0: CPU lists unimplemented */ | ||
583 | clr %o1 /* ARG1: CPU lists unimplemented */ | ||
584 | mov %g5, %o2 /* ARG2: mmu context */ | ||
585 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
586 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
587 | ta HV_FAST_TRAP | ||
588 | mov HV_FAST_MMU_DEMAP_CTX, %g6 | ||
589 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
590 | mov %o0, %g5 | ||
591 | mov %g2, %o0 | ||
592 | mov %g3, %o1 | ||
593 | mov %g4, %o2 | ||
594 | mov %g1, %o3 | ||
595 | mov %g7, %o5 | ||
596 | membar #Sync | ||
597 | retry | ||
598 | |||
599 | .globl __hypervisor_xcall_flush_tlb_pending | ||
600 | __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ | ||
601 | /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ | ||
602 | sllx %g1, 3, %g1 | ||
603 | mov %o0, %g2 | ||
604 | mov %o1, %g3 | ||
605 | mov %o2, %g4 | ||
606 | 1: sub %g1, (1 << 3), %g1 | ||
607 | ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ | ||
608 | mov %g5, %o1 /* ARG1: mmu context */ | ||
609 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
610 | srlx %o0, PAGE_SHIFT, %o0 | ||
611 | sllx %o0, PAGE_SHIFT, %o0 | ||
612 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
613 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
614 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | ||
615 | mov %o0, %g5 | ||
616 | brnz,pt %g1, 1b | ||
617 | nop | ||
618 | mov %g2, %o0 | ||
619 | mov %g3, %o1 | ||
620 | mov %g4, %o2 | ||
621 | membar #Sync | ||
622 | retry | ||
623 | |||
624 | .globl __hypervisor_xcall_flush_tlb_kernel_range | ||
625 | __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ | ||
626 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ | ||
627 | sethi %hi(PAGE_SIZE - 1), %g2 | ||
628 | or %g2, %lo(PAGE_SIZE - 1), %g2 | ||
629 | andn %g1, %g2, %g1 | ||
630 | andn %g7, %g2, %g7 | ||
631 | sub %g7, %g1, %g3 | ||
632 | add %g2, 1, %g2 | ||
633 | sub %g3, %g2, %g3 | ||
634 | mov %o0, %g2 | ||
635 | mov %o1, %g4 | ||
636 | mov %o2, %g7 | ||
637 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ | ||
638 | mov 0, %o1 /* ARG1: mmu context */ | ||
639 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
640 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
641 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | ||
642 | brnz,pn %o0, __hypervisor_tlb_xcall_error | ||
643 | mov %o0, %g5 | ||
644 | sethi %hi(PAGE_SIZE), %o2 | ||
645 | brnz,pt %g3, 1b | ||
646 | sub %g3, %o2, %g3 | ||
647 | mov %g2, %o0 | ||
648 | mov %g4, %o1 | ||
649 | mov %g7, %o2 | ||
650 | membar #Sync | ||
651 | retry | ||
652 | |||
653 | /* These just get rescheduled to PIL vectors. */ | ||
654 | .globl xcall_call_function | ||
655 | xcall_call_function: | ||
656 | wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint | ||
657 | retry | ||
658 | |||
659 | .globl xcall_call_function_single | ||
660 | xcall_call_function_single: | ||
661 | wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint | ||
662 | retry | ||
663 | |||
664 | .globl xcall_receive_signal | ||
665 | xcall_receive_signal: | ||
666 | wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint | ||
667 | retry | ||
668 | |||
669 | .globl xcall_capture | ||
670 | xcall_capture: | ||
671 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | ||
672 | retry | ||
673 | |||
674 | .globl xcall_new_mmu_context_version | ||
675 | xcall_new_mmu_context_version: | ||
676 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | ||
677 | retry | ||
678 | |||
679 | #ifdef CONFIG_KGDB | ||
680 | .globl xcall_kgdb_capture | ||
681 | xcall_kgdb_capture: | ||
682 | 661: rdpr %pstate, %g2 | ||
683 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate | ||
684 | .section .sun4v_2insn_patch, "ax" | ||
685 | .word 661b | ||
686 | nop | ||
687 | nop | ||
688 | .previous | ||
689 | |||
690 | rdpr %pil, %g2 | ||
691 | wrpr %g0, 15, %pil | ||
692 | sethi %hi(109f), %g7 | ||
693 | ba,pt %xcc, etrap_irq | ||
694 | 109: or %g7, %lo(109b), %g7 | ||
695 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
696 | call trace_hardirqs_off | ||
697 | nop | ||
698 | #endif | ||
699 | call smp_kgdb_capture_client | ||
700 | add %sp, PTREGS_OFF, %o0 | ||
701 | /* Has to be a non-v9 branch due to the large distance. */ | ||
702 | ba rtrap_xcall | ||
703 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | ||
704 | #endif | ||
705 | |||
706 | #endif /* CONFIG_SMP */ | ||
707 | |||
708 | |||
709 | .globl hypervisor_patch_cachetlbops | ||
710 | hypervisor_patch_cachetlbops: | ||
711 | save %sp, -128, %sp | ||
712 | |||
713 | sethi %hi(__flush_tlb_mm), %o0 | ||
714 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
715 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 | ||
716 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 | ||
717 | call tlb_patch_one | ||
718 | mov 10, %o2 | ||
719 | |||
720 | sethi %hi(__flush_tlb_pending), %o0 | ||
721 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
722 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | ||
723 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 | ||
724 | call tlb_patch_one | ||
725 | mov 16, %o2 | ||
726 | |||
727 | sethi %hi(__flush_tlb_kernel_range), %o0 | ||
728 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | ||
729 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 | ||
730 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 | ||
731 | call tlb_patch_one | ||
732 | mov 16, %o2 | ||
733 | |||
734 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
735 | sethi %hi(__flush_dcache_page), %o0 | ||
736 | or %o0, %lo(__flush_dcache_page), %o0 | ||
737 | sethi %hi(__hypervisor_flush_dcache_page), %o1 | ||
738 | or %o1, %lo(__hypervisor_flush_dcache_page), %o1 | ||
739 | call tlb_patch_one | ||
740 | mov 2, %o2 | ||
741 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
742 | |||
743 | #ifdef CONFIG_SMP | ||
744 | sethi %hi(xcall_flush_tlb_mm), %o0 | ||
745 | or %o0, %lo(xcall_flush_tlb_mm), %o0 | ||
746 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
747 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 | ||
748 | call tlb_patch_one | ||
749 | mov 21, %o2 | ||
750 | |||
751 | sethi %hi(xcall_flush_tlb_pending), %o0 | ||
752 | or %o0, %lo(xcall_flush_tlb_pending), %o0 | ||
753 | sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
754 | or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 | ||
755 | call tlb_patch_one | ||
756 | mov 21, %o2 | ||
757 | |||
758 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | ||
759 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | ||
760 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
761 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | ||
762 | call tlb_patch_one | ||
763 | mov 25, %o2 | ||
764 | #endif /* CONFIG_SMP */ | ||
765 | |||
766 | ret | ||
767 | restore | ||