diff options
Diffstat (limited to 'arch/frv/mm/fault.c')
-rw-r--r-- | arch/frv/mm/fault.c | 325 |
1 files changed, 325 insertions, 0 deletions
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c new file mode 100644 index 000000000000..41d02ac48233 --- /dev/null +++ b/arch/frv/mm/fault.c | |||
@@ -0,0 +1,325 @@ | |||
1 | /* | ||
2 | * linux/arch/frv/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
5 | * - Written by David Howells (dhowells@redhat.com) | ||
6 | * - Derived from arch/m68knommu/mm/fault.c | ||
7 | * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, | ||
8 | * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) | ||
9 | * | ||
10 | * Based on: | ||
11 | * | ||
12 | * linux/arch/m68k/mm/fault.c | ||
13 | * | ||
14 | * Copyright (C) 1995 Hamish Macdonald | ||
15 | */ | ||
16 | |||
17 | #include <linux/mman.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | |||
23 | #include <asm/system.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/gdb-stub.h> | ||
27 | |||
28 | /*****************************************************************************/ | ||
29 | /* | ||
30 | * This routine handles page faults. It determines the problem, and | ||
31 | * then passes it off to one of the appropriate routines. | ||
32 | */ | ||
33 | asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0) | ||
34 | { | ||
35 | struct vm_area_struct *vma; | ||
36 | struct mm_struct *mm; | ||
37 | unsigned long _pme, lrai, lrad, fixup; | ||
38 | siginfo_t info; | ||
39 | pgd_t *pge; | ||
40 | pud_t *pue; | ||
41 | pte_t *pte; | ||
42 | int write; | ||
43 | |||
44 | #if 0 | ||
45 | const char *atxc[16] = { | ||
46 | [0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat", | ||
47 | [0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot", | ||
48 | }; | ||
49 | |||
50 | printk("do_page_fault(%d,%lx [%s],%lx)\n", | ||
51 | datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0); | ||
52 | #endif | ||
53 | |||
54 | mm = current->mm; | ||
55 | |||
56 | /* | ||
57 | * We fault-in kernel-space virtual memory on-demand. The | ||
58 | * 'reference' page table is init_mm.pgd. | ||
59 | * | ||
60 | * NOTE! We MUST NOT take any locks for this case. We may | ||
61 | * be in an interrupt or a critical region, and should | ||
62 | * only copy the information from the master page table, | ||
63 | * nothing more. | ||
64 | * | ||
65 | * This verifies that the fault happens in kernel space | ||
66 | * and that the fault was a page not present (invalid) error | ||
67 | */ | ||
68 | if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) { | ||
69 | if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END) | ||
70 | goto kernel_pte_fault; | ||
71 | if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END) | ||
72 | goto kernel_pte_fault; | ||
73 | } | ||
74 | |||
75 | info.si_code = SEGV_MAPERR; | ||
76 | |||
77 | /* | ||
78 | * If we're in an interrupt or have no user | ||
79 | * context, we must not take the fault.. | ||
80 | */ | ||
81 | if (in_interrupt() || !mm) | ||
82 | goto no_context; | ||
83 | |||
84 | down_read(&mm->mmap_sem); | ||
85 | |||
86 | vma = find_vma(mm, ear0); | ||
87 | if (!vma) | ||
88 | goto bad_area; | ||
89 | if (vma->vm_start <= ear0) | ||
90 | goto good_area; | ||
91 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
92 | goto bad_area; | ||
93 | |||
94 | if (user_mode(__frame)) { | ||
95 | /* | ||
96 | * accessing the stack below %esp is always a bug. | ||
97 | * The "+ 32" is there due to some instructions (like | ||
98 | * pusha) doing post-decrement on the stack and that | ||
99 | * doesn't show up until later.. | ||
100 | */ | ||
101 | if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) { | ||
102 | #if 0 | ||
103 | printk("[%d] ### Access below stack @%lx (sp=%lx)\n", | ||
104 | current->pid, ear0, __frame->sp); | ||
105 | show_registers(__frame); | ||
106 | printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
107 | current->pid, | ||
108 | __frame->pc, | ||
109 | ((u8*)__frame->pc)[0], | ||
110 | ((u8*)__frame->pc)[1], | ||
111 | ((u8*)__frame->pc)[2], | ||
112 | ((u8*)__frame->pc)[3], | ||
113 | ((u8*)__frame->pc)[4], | ||
114 | ((u8*)__frame->pc)[5], | ||
115 | ((u8*)__frame->pc)[6], | ||
116 | ((u8*)__frame->pc)[7] | ||
117 | ); | ||
118 | #endif | ||
119 | goto bad_area; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | if (expand_stack(vma, ear0)) | ||
124 | goto bad_area; | ||
125 | |||
126 | /* | ||
127 | * Ok, we have a good vm_area for this memory access, so | ||
128 | * we can handle it.. | ||
129 | */ | ||
130 | good_area: | ||
131 | info.si_code = SEGV_ACCERR; | ||
132 | write = 0; | ||
133 | switch (esr0 & ESR0_ATXC) { | ||
134 | default: | ||
135 | /* handle write to write protected page */ | ||
136 | case ESR0_ATXC_WP_EXCEP: | ||
137 | #ifdef TEST_VERIFY_AREA | ||
138 | if (!(user_mode(__frame))) | ||
139 | printk("WP fault at %08lx\n", __frame->pc); | ||
140 | #endif | ||
141 | if (!(vma->vm_flags & VM_WRITE)) | ||
142 | goto bad_area; | ||
143 | write = 1; | ||
144 | break; | ||
145 | |||
146 | /* handle read from protected page */ | ||
147 | case ESR0_ATXC_PRIV_EXCEP: | ||
148 | goto bad_area; | ||
149 | |||
150 | /* handle read, write or exec on absent page | ||
151 | * - can't support write without permitting read | ||
152 | * - don't support execute without permitting read and vice-versa | ||
153 | */ | ||
154 | case ESR0_ATXC_AMRTLB_MISS: | ||
155 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | ||
156 | goto bad_area; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * If for any reason at all we couldn't handle the fault, | ||
162 | * make sure we exit gracefully rather than endlessly redo | ||
163 | * the fault. | ||
164 | */ | ||
165 | switch (handle_mm_fault(mm, vma, ear0, write)) { | ||
166 | case 1: | ||
167 | current->min_flt++; | ||
168 | break; | ||
169 | case 2: | ||
170 | current->maj_flt++; | ||
171 | break; | ||
172 | case 0: | ||
173 | goto do_sigbus; | ||
174 | default: | ||
175 | goto out_of_memory; | ||
176 | } | ||
177 | |||
178 | up_read(&mm->mmap_sem); | ||
179 | return; | ||
180 | |||
181 | /* | ||
182 | * Something tried to access memory that isn't in our memory map.. | ||
183 | * Fix it, but check if it's kernel or user first.. | ||
184 | */ | ||
185 | bad_area: | ||
186 | up_read(&mm->mmap_sem); | ||
187 | |||
188 | /* User mode accesses just cause a SIGSEGV */ | ||
189 | if (user_mode(__frame)) { | ||
190 | info.si_signo = SIGSEGV; | ||
191 | info.si_errno = 0; | ||
192 | /* info.si_code has been set above */ | ||
193 | info.si_addr = (void *) ear0; | ||
194 | force_sig_info(SIGSEGV, &info, current); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | no_context: | ||
199 | /* are we prepared to handle this kernel fault? */ | ||
200 | if ((fixup = search_exception_table(__frame->pc)) != 0) { | ||
201 | __frame->pc = fixup; | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Oops. The kernel tried to access some bad page. We'll have to | ||
207 | * terminate things with extreme prejudice. | ||
208 | */ | ||
209 | |||
210 | bust_spinlocks(1); | ||
211 | |||
212 | if (ear0 < PAGE_SIZE) | ||
213 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
214 | else | ||
215 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
216 | printk(" at virtual addr %08lx\n", ear0); | ||
217 | printk(" PC : %08lx\n", __frame->pc); | ||
218 | printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0); | ||
219 | |||
220 | asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0)); | ||
221 | asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0)); | ||
222 | |||
223 | printk(KERN_ALERT " LRAI: %08lx\n", lrai); | ||
224 | printk(KERN_ALERT " LRAD: %08lx\n", lrad); | ||
225 | |||
226 | __break_hijack_kernel_event(); | ||
227 | |||
228 | pge = pgd_offset(current->mm, ear0); | ||
229 | pue = pud_offset(pge, ear0); | ||
230 | _pme = pue->pue[0].ste[0]; | ||
231 | |||
232 | printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme); | ||
233 | |||
234 | if (_pme & xAMPRx_V) { | ||
235 | unsigned long dampr, damlr, val; | ||
236 | |||
237 | asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1" | ||
238 | : "=&r"(dampr), "=r"(damlr) | ||
239 | : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V) | ||
240 | ); | ||
241 | |||
242 | pte = (pte_t *) damlr + __pte_index(ear0); | ||
243 | val = pte_val(*pte); | ||
244 | |||
245 | asm volatile("movgs %0,dampr2" :: "r" (dampr)); | ||
246 | |||
247 | printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val); | ||
248 | } | ||
249 | |||
250 | die_if_kernel("Oops\n"); | ||
251 | do_exit(SIGKILL); | ||
252 | |||
253 | /* | ||
254 | * We ran out of memory, or some other thing happened to us that made | ||
255 | * us unable to handle the page fault gracefully. | ||
256 | */ | ||
257 | out_of_memory: | ||
258 | up_read(&mm->mmap_sem); | ||
259 | printk("VM: killing process %s\n", current->comm); | ||
260 | if (user_mode(__frame)) | ||
261 | do_exit(SIGKILL); | ||
262 | goto no_context; | ||
263 | |||
264 | do_sigbus: | ||
265 | up_read(&mm->mmap_sem); | ||
266 | |||
267 | /* | ||
268 | * Send a sigbus, regardless of whether we were in kernel | ||
269 | * or user mode. | ||
270 | */ | ||
271 | info.si_signo = SIGBUS; | ||
272 | info.si_errno = 0; | ||
273 | info.si_code = BUS_ADRERR; | ||
274 | info.si_addr = (void *) ear0; | ||
275 | force_sig_info(SIGBUS, &info, current); | ||
276 | |||
277 | /* Kernel mode? Handle exceptions or die */ | ||
278 | if (!user_mode(__frame)) | ||
279 | goto no_context; | ||
280 | return; | ||
281 | |||
282 | /* | ||
283 | * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap) | ||
284 | */ | ||
285 | kernel_pte_fault: | ||
286 | { | ||
287 | /* | ||
288 | * Synchronize this task's top level page-table | ||
289 | * with the 'reference' page table. | ||
290 | * | ||
291 | * Do _not_ use "tsk" here. We might be inside | ||
292 | * an interrupt in the middle of a task switch.. | ||
293 | */ | ||
294 | int index = pgd_index(ear0); | ||
295 | pgd_t *pgd, *pgd_k; | ||
296 | pud_t *pud, *pud_k; | ||
297 | pmd_t *pmd, *pmd_k; | ||
298 | pte_t *pte_k; | ||
299 | |||
300 | pgd = (pgd_t *) __get_TTBR(); | ||
301 | pgd = (pgd_t *)__va(pgd) + index; | ||
302 | pgd_k = ((pgd_t *)(init_mm.pgd)) + index; | ||
303 | |||
304 | if (!pgd_present(*pgd_k)) | ||
305 | goto no_context; | ||
306 | //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line | ||
307 | |||
308 | pud_k = pud_offset(pgd_k, ear0); | ||
309 | if (!pud_present(*pud_k)) | ||
310 | goto no_context; | ||
311 | |||
312 | pmd_k = pmd_offset(pud_k, ear0); | ||
313 | if (!pmd_present(*pmd_k)) | ||
314 | goto no_context; | ||
315 | |||
316 | pud = pud_offset(pgd, ear0); | ||
317 | pmd = pmd_offset(pud, ear0); | ||
318 | set_pmd(pmd, *pmd_k); | ||
319 | |||
320 | pte_k = pte_offset_kernel(pmd_k, ear0); | ||
321 | if (!pte_present(*pte_k)) | ||
322 | goto no_context; | ||
323 | return; | ||
324 | } | ||
325 | } /* end do_page_fault() */ | ||