diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/sh/mm | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/fault_32.c | 374 | ||||
-rw-r--r-- | arch/sh/mm/fault_64.c | 266 |
2 files changed, 640 insertions, 0 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c new file mode 100644 index 00000000000..7bebd044f2a --- /dev/null +++ b/arch/sh/mm/fault_32.c | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * Page fault handler for SH with an MMU. | ||
3 | * | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * Copyright (C) 2003 - 2009 Paul Mundt | ||
6 | * | ||
7 | * Based on linux/arch/i386/mm/fault.c: | ||
8 | * Copyright (C) 1995 Linus Torvalds | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/hardirq.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/perf_event.h> | ||
19 | #include <asm/io_trapped.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | |||
24 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | ||
25 | { | ||
26 | int ret = 0; | ||
27 | |||
28 | if (kprobes_built_in() && !user_mode(regs)) { | ||
29 | preempt_disable(); | ||
30 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | ||
31 | ret = 1; | ||
32 | preempt_enable(); | ||
33 | } | ||
34 | |||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
39 | { | ||
40 | unsigned index = pgd_index(address); | ||
41 | pgd_t *pgd_k; | ||
42 | pud_t *pud, *pud_k; | ||
43 | pmd_t *pmd, *pmd_k; | ||
44 | |||
45 | pgd += index; | ||
46 | pgd_k = init_mm.pgd + index; | ||
47 | |||
48 | if (!pgd_present(*pgd_k)) | ||
49 | return NULL; | ||
50 | |||
51 | pud = pud_offset(pgd, address); | ||
52 | pud_k = pud_offset(pgd_k, address); | ||
53 | if (!pud_present(*pud_k)) | ||
54 | return NULL; | ||
55 | |||
56 | if (!pud_present(*pud)) | ||
57 | set_pud(pud, *pud_k); | ||
58 | |||
59 | pmd = pmd_offset(pud, address); | ||
60 | pmd_k = pmd_offset(pud_k, address); | ||
61 | if (!pmd_present(*pmd_k)) | ||
62 | return NULL; | ||
63 | |||
64 | if (!pmd_present(*pmd)) | ||
65 | set_pmd(pmd, *pmd_k); | ||
66 | else { | ||
67 | /* | ||
68 | * The page tables are fully synchronised so there must | ||
69 | * be another reason for the fault. Return NULL here to | ||
70 | * signal that we have not taken care of the fault. | ||
71 | */ | ||
72 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | return pmd_k; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Handle a fault on the vmalloc or module mapping area | ||
81 | */ | ||
82 | static noinline int vmalloc_fault(unsigned long address) | ||
83 | { | ||
84 | pgd_t *pgd_k; | ||
85 | pmd_t *pmd_k; | ||
86 | pte_t *pte_k; | ||
87 | |||
88 | /* Make sure we are in vmalloc/module/P3 area: */ | ||
89 | if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) | ||
90 | return -1; | ||
91 | |||
92 | /* | ||
93 | * Synchronize this task's top level page-table | ||
94 | * with the 'reference' page table. | ||
95 | * | ||
96 | * Do _not_ use "current" here. We might be inside | ||
97 | * an interrupt in the middle of a task switch.. | ||
98 | */ | ||
99 | pgd_k = get_TTB(); | ||
100 | pmd_k = vmalloc_sync_one(pgd_k, address); | ||
101 | if (!pmd_k) | ||
102 | return -1; | ||
103 | |||
104 | pte_k = pte_offset_kernel(pmd_k, address); | ||
105 | if (!pte_present(*pte_k)) | ||
106 | return -1; | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static int fault_in_kernel_space(unsigned long address) | ||
112 | { | ||
113 | return address >= TASK_SIZE; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * This routine handles page faults. It determines the address, | ||
118 | * and the problem, and then passes it off to one of the appropriate | ||
119 | * routines. | ||
120 | */ | ||
121 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | ||
122 | unsigned long writeaccess, | ||
123 | unsigned long address) | ||
124 | { | ||
125 | unsigned long vec; | ||
126 | struct task_struct *tsk; | ||
127 | struct mm_struct *mm; | ||
128 | struct vm_area_struct * vma; | ||
129 | int si_code; | ||
130 | int fault; | ||
131 | siginfo_t info; | ||
132 | |||
133 | tsk = current; | ||
134 | mm = tsk->mm; | ||
135 | si_code = SEGV_MAPERR; | ||
136 | vec = lookup_exception_vector(); | ||
137 | |||
138 | /* | ||
139 | * We fault-in kernel-space virtual memory on-demand. The | ||
140 | * 'reference' page table is init_mm.pgd. | ||
141 | * | ||
142 | * NOTE! We MUST NOT take any locks for this case. We may | ||
143 | * be in an interrupt or a critical region, and should | ||
144 | * only copy the information from the master page table, | ||
145 | * nothing more. | ||
146 | */ | ||
147 | if (unlikely(fault_in_kernel_space(address))) { | ||
148 | if (vmalloc_fault(address) >= 0) | ||
149 | return; | ||
150 | if (notify_page_fault(regs, vec)) | ||
151 | return; | ||
152 | |||
153 | goto bad_area_nosemaphore; | ||
154 | } | ||
155 | |||
156 | if (unlikely(notify_page_fault(regs, vec))) | ||
157 | return; | ||
158 | |||
159 | /* Only enable interrupts if they were on before the fault */ | ||
160 | if ((regs->sr & SR_IMASK) != SR_IMASK) | ||
161 | local_irq_enable(); | ||
162 | |||
163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | ||
164 | |||
165 | /* | ||
166 | * If we're in an interrupt, have no user context or are running | ||
167 | * in an atomic region then we must not take the fault: | ||
168 | */ | ||
169 | if (in_atomic() || !mm) | ||
170 | goto no_context; | ||
171 | |||
172 | down_read(&mm->mmap_sem); | ||
173 | |||
174 | vma = find_vma(mm, address); | ||
175 | if (!vma) | ||
176 | goto bad_area; | ||
177 | if (vma->vm_start <= address) | ||
178 | goto good_area; | ||
179 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
180 | goto bad_area; | ||
181 | if (expand_stack(vma, address)) | ||
182 | goto bad_area; | ||
183 | |||
184 | /* | ||
185 | * Ok, we have a good vm_area for this memory access, so | ||
186 | * we can handle it.. | ||
187 | */ | ||
188 | good_area: | ||
189 | si_code = SEGV_ACCERR; | ||
190 | if (writeaccess) { | ||
191 | if (!(vma->vm_flags & VM_WRITE)) | ||
192 | goto bad_area; | ||
193 | } else { | ||
194 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
195 | goto bad_area; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * If for any reason at all we couldn't handle the fault, | ||
200 | * make sure we exit gracefully rather than endlessly redo | ||
201 | * the fault. | ||
202 | */ | ||
203 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); | ||
204 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
205 | if (fault & VM_FAULT_OOM) | ||
206 | goto out_of_memory; | ||
207 | else if (fault & VM_FAULT_SIGBUS) | ||
208 | goto do_sigbus; | ||
209 | BUG(); | ||
210 | } | ||
211 | if (fault & VM_FAULT_MAJOR) { | ||
212 | tsk->maj_flt++; | ||
213 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | ||
214 | regs, address); | ||
215 | } else { | ||
216 | tsk->min_flt++; | ||
217 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
218 | regs, address); | ||
219 | } | ||
220 | |||
221 | up_read(&mm->mmap_sem); | ||
222 | return; | ||
223 | |||
224 | /* | ||
225 | * Something tried to access memory that isn't in our memory map.. | ||
226 | * Fix it, but check if it's kernel or user first.. | ||
227 | */ | ||
228 | bad_area: | ||
229 | up_read(&mm->mmap_sem); | ||
230 | |||
231 | bad_area_nosemaphore: | ||
232 | if (user_mode(regs)) { | ||
233 | info.si_signo = SIGSEGV; | ||
234 | info.si_errno = 0; | ||
235 | info.si_code = si_code; | ||
236 | info.si_addr = (void *) address; | ||
237 | force_sig_info(SIGSEGV, &info, tsk); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | no_context: | ||
242 | /* Are we prepared to handle this kernel fault? */ | ||
243 | if (fixup_exception(regs)) | ||
244 | return; | ||
245 | |||
246 | if (handle_trapped_io(regs, address)) | ||
247 | return; | ||
248 | /* | ||
249 | * Oops. The kernel tried to access some bad page. We'll have to | ||
250 | * terminate things with extreme prejudice. | ||
251 | * | ||
252 | */ | ||
253 | |||
254 | bust_spinlocks(1); | ||
255 | |||
256 | if (oops_may_print()) { | ||
257 | unsigned long page; | ||
258 | |||
259 | if (address < PAGE_SIZE) | ||
260 | printk(KERN_ALERT "Unable to handle kernel NULL " | ||
261 | "pointer dereference"); | ||
262 | else | ||
263 | printk(KERN_ALERT "Unable to handle kernel paging " | ||
264 | "request"); | ||
265 | printk(" at virtual address %08lx\n", address); | ||
266 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | ||
267 | page = (unsigned long)get_TTB(); | ||
268 | if (page) { | ||
269 | page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT]; | ||
270 | printk(KERN_ALERT "*pde = %08lx\n", page); | ||
271 | if (page & _PAGE_PRESENT) { | ||
272 | page &= PAGE_MASK; | ||
273 | address &= 0x003ff000; | ||
274 | page = ((__typeof__(page) *) | ||
275 | __va(page))[address >> | ||
276 | PAGE_SHIFT]; | ||
277 | printk(KERN_ALERT "*pte = %08lx\n", page); | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | |||
282 | die("Oops", regs, writeaccess); | ||
283 | bust_spinlocks(0); | ||
284 | do_exit(SIGKILL); | ||
285 | |||
286 | /* | ||
287 | * We ran out of memory, or some other thing happened to us that made | ||
288 | * us unable to handle the page fault gracefully. | ||
289 | */ | ||
290 | out_of_memory: | ||
291 | up_read(&mm->mmap_sem); | ||
292 | if (!user_mode(regs)) | ||
293 | goto no_context; | ||
294 | pagefault_out_of_memory(); | ||
295 | return; | ||
296 | |||
297 | do_sigbus: | ||
298 | up_read(&mm->mmap_sem); | ||
299 | |||
300 | /* | ||
301 | * Send a sigbus, regardless of whether we were in kernel | ||
302 | * or user mode. | ||
303 | */ | ||
304 | info.si_signo = SIGBUS; | ||
305 | info.si_errno = 0; | ||
306 | info.si_code = BUS_ADRERR; | ||
307 | info.si_addr = (void *)address; | ||
308 | force_sig_info(SIGBUS, &info, tsk); | ||
309 | |||
310 | /* Kernel mode? Handle exceptions or die */ | ||
311 | if (!user_mode(regs)) | ||
312 | goto no_context; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Called with interrupts disabled. | ||
317 | */ | ||
318 | asmlinkage int __kprobes | ||
319 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | ||
320 | unsigned long address) | ||
321 | { | ||
322 | pgd_t *pgd; | ||
323 | pud_t *pud; | ||
324 | pmd_t *pmd; | ||
325 | pte_t *pte; | ||
326 | pte_t entry; | ||
327 | |||
328 | /* | ||
329 | * We don't take page faults for P1, P2, and parts of P4, these | ||
330 | * are always mapped, whether it be due to legacy behaviour in | ||
331 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
332 | */ | ||
333 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
334 | pgd = pgd_offset_k(address); | ||
335 | } else { | ||
336 | if (unlikely(address >= TASK_SIZE || !current->mm)) | ||
337 | return 1; | ||
338 | |||
339 | pgd = pgd_offset(current->mm, address); | ||
340 | } | ||
341 | |||
342 | pud = pud_offset(pgd, address); | ||
343 | if (pud_none_or_clear_bad(pud)) | ||
344 | return 1; | ||
345 | pmd = pmd_offset(pud, address); | ||
346 | if (pmd_none_or_clear_bad(pmd)) | ||
347 | return 1; | ||
348 | pte = pte_offset_kernel(pmd, address); | ||
349 | entry = *pte; | ||
350 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
351 | return 1; | ||
352 | if (unlikely(writeaccess && !pte_write(entry))) | ||
353 | return 1; | ||
354 | |||
355 | if (writeaccess) | ||
356 | entry = pte_mkdirty(entry); | ||
357 | entry = pte_mkyoung(entry); | ||
358 | |||
359 | set_pte(pte, entry); | ||
360 | |||
361 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | ||
362 | /* | ||
363 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in | ||
364 | * the case of an initial page write exception, so we need to | ||
365 | * flush it in order to avoid potential TLB entry duplication. | ||
366 | */ | ||
367 | if (writeaccess == 2) | ||
368 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
369 | #endif | ||
370 | |||
371 | update_mmu_cache(NULL, address, pte); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c new file mode 100644 index 00000000000..2b356cec248 --- /dev/null +++ b/arch/sh/mm/fault_64.c | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | * The SH64 TLB miss. | ||
3 | * | ||
4 | * Original code from fault.c | ||
5 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
6 | * | ||
7 | * Fast PTE->TLB refill path | ||
8 | * Copyright (C) 2003 Richard.Curnow@superh.com | ||
9 | * | ||
10 | * IMPORTANT NOTES : | ||
11 | * The do_fast_page_fault function is called from a context in entry.S | ||
12 | * where very few registers have been saved. In particular, the code in | ||
13 | * this file must be compiled not to use ANY caller-save registers that | ||
14 | * are not part of the restricted save set. Also, it means that code in | ||
15 | * this file must not make calls to functions elsewhere in the kernel, or | ||
16 | * else the excepting context will see corruption in its caller-save | ||
17 | * registers. Plus, the entry.S save area is non-reentrant, so this code | ||
18 | * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic | ||
19 | * on any exception. | ||
20 | * | ||
21 | * This file is subject to the terms and conditions of the GNU General Public | ||
22 | * License. See the file "COPYING" in the main directory of this archive | ||
23 | * for more details. | ||
24 | */ | ||
25 | #include <linux/signal.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/ptrace.h> | ||
32 | #include <linux/mman.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/smp.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <asm/system.h> | ||
37 | #include <asm/tlb.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/uaccess.h> | ||
40 | #include <asm/pgalloc.h> | ||
41 | #include <asm/mmu_context.h> | ||
42 | #include <cpu/registers.h> | ||
43 | |||
44 | /* Callable from fault.c, so not static */ | ||
45 | inline void __do_tlb_refill(unsigned long address, | ||
46 | unsigned long long is_text_not_data, pte_t *pte) | ||
47 | { | ||
48 | unsigned long long ptel; | ||
49 | unsigned long long pteh=0; | ||
50 | struct tlb_info *tlbp; | ||
51 | unsigned long long next; | ||
52 | |||
53 | /* Get PTEL first */ | ||
54 | ptel = pte_val(*pte); | ||
55 | |||
56 | /* | ||
57 | * Set PTEH register | ||
58 | */ | ||
59 | pteh = neff_sign_extend(address & MMU_VPN_MASK); | ||
60 | |||
61 | /* Set the ASID. */ | ||
62 | pteh |= get_asid() << PTEH_ASID_SHIFT; | ||
63 | pteh |= PTEH_VALID; | ||
64 | |||
65 | /* Set PTEL register, set_pte has performed the sign extension */ | ||
66 | ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
67 | |||
68 | tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); | ||
69 | next = tlbp->next; | ||
70 | __flush_tlb_slot(next); | ||
71 | asm volatile ("putcfg %0,1,%2\n\n\t" | ||
72 | "putcfg %0,0,%1\n" | ||
73 | : : "r" (next), "r" (pteh), "r" (ptel) ); | ||
74 | |||
75 | next += TLB_STEP; | ||
76 | if (next > tlbp->last) next = tlbp->first; | ||
77 | tlbp->next = next; | ||
78 | |||
79 | } | ||
80 | |||
81 | static int handle_vmalloc_fault(struct mm_struct *mm, | ||
82 | unsigned long protection_flags, | ||
83 | unsigned long long textaccess, | ||
84 | unsigned long address) | ||
85 | { | ||
86 | pgd_t *dir; | ||
87 | pud_t *pud; | ||
88 | pmd_t *pmd; | ||
89 | static pte_t *pte; | ||
90 | pte_t entry; | ||
91 | |||
92 | dir = pgd_offset_k(address); | ||
93 | |||
94 | pud = pud_offset(dir, address); | ||
95 | if (pud_none_or_clear_bad(pud)) | ||
96 | return 0; | ||
97 | |||
98 | pmd = pmd_offset(pud, address); | ||
99 | if (pmd_none_or_clear_bad(pmd)) | ||
100 | return 0; | ||
101 | |||
102 | pte = pte_offset_kernel(pmd, address); | ||
103 | entry = *pte; | ||
104 | |||
105 | if (pte_none(entry) || !pte_present(entry)) | ||
106 | return 0; | ||
107 | if ((pte_val(entry) & protection_flags) != protection_flags) | ||
108 | return 0; | ||
109 | |||
110 | __do_tlb_refill(address, textaccess, pte); | ||
111 | |||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static int handle_tlbmiss(struct mm_struct *mm, | ||
116 | unsigned long long protection_flags, | ||
117 | unsigned long long textaccess, | ||
118 | unsigned long address) | ||
119 | { | ||
120 | pgd_t *dir; | ||
121 | pud_t *pud; | ||
122 | pmd_t *pmd; | ||
123 | pte_t *pte; | ||
124 | pte_t entry; | ||
125 | |||
126 | /* NB. The PGD currently only contains a single entry - there is no | ||
127 | page table tree stored for the top half of the address space since | ||
128 | virtual pages in that region should never be mapped in user mode. | ||
129 | (In kernel mode, the only things in that region are the 512Mb super | ||
130 | page (locked in), and vmalloc (modules) + I/O device pages (handled | ||
131 | by handle_vmalloc_fault), so no PGD for the upper half is required | ||
132 | by kernel mode either). | ||
133 | |||
134 | See how mm->pgd is allocated and initialised in pgd_alloc to see why | ||
135 | the next test is necessary. - RPC */ | ||
136 | if (address >= (unsigned long) TASK_SIZE) | ||
137 | /* upper half - never has page table entries. */ | ||
138 | return 0; | ||
139 | |||
140 | dir = pgd_offset(mm, address); | ||
141 | if (pgd_none(*dir) || !pgd_present(*dir)) | ||
142 | return 0; | ||
143 | if (!pgd_present(*dir)) | ||
144 | return 0; | ||
145 | |||
146 | pud = pud_offset(dir, address); | ||
147 | if (pud_none(*pud) || !pud_present(*pud)) | ||
148 | return 0; | ||
149 | |||
150 | pmd = pmd_offset(pud, address); | ||
151 | if (pmd_none(*pmd) || !pmd_present(*pmd)) | ||
152 | return 0; | ||
153 | |||
154 | pte = pte_offset_kernel(pmd, address); | ||
155 | entry = *pte; | ||
156 | |||
157 | if (pte_none(entry) || !pte_present(entry)) | ||
158 | return 0; | ||
159 | |||
160 | /* | ||
161 | * If the page doesn't have sufficient protection bits set to | ||
162 | * service the kind of fault being handled, there's not much | ||
163 | * point doing the TLB refill. Punt the fault to the general | ||
164 | * handler. | ||
165 | */ | ||
166 | if ((pte_val(entry) & protection_flags) != protection_flags) | ||
167 | return 0; | ||
168 | |||
169 | __do_tlb_refill(address, textaccess, pte); | ||
170 | |||
171 | return 1; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Put all this information into one structure so that everything is just | ||
176 | * arithmetic relative to a single base address. This reduces the number | ||
177 | * of movi/shori pairs needed just to load addresses of static data. | ||
178 | */ | ||
179 | struct expevt_lookup { | ||
180 | unsigned short protection_flags[8]; | ||
181 | unsigned char is_text_access[8]; | ||
182 | unsigned char is_write_access[8]; | ||
183 | }; | ||
184 | |||
185 | #define PRU (1<<9) | ||
186 | #define PRW (1<<8) | ||
187 | #define PRX (1<<7) | ||
188 | #define PRR (1<<6) | ||
189 | |||
190 | #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) | ||
191 | #define YOUNG (_PAGE_ACCESSED) | ||
192 | |||
193 | /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether | ||
194 | the fault happened in user mode or privileged mode. */ | ||
195 | static struct expevt_lookup expevt_lookup_table = { | ||
196 | .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, | ||
197 | .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} | ||
198 | }; | ||
199 | |||
200 | /* | ||
201 | This routine handles page faults that can be serviced just by refilling a | ||
202 | TLB entry from an existing page table entry. (This case represents a very | ||
203 | large majority of page faults.) Return 1 if the fault was successfully | ||
204 | handled. Return 0 if the fault could not be handled. (This leads into the | ||
205 | general fault handling in fault.c which deals with mapping file-backed | ||
206 | pages, stack growth, segmentation faults, swapping etc etc) | ||
207 | */ | ||
208 | asmlinkage int do_fast_page_fault(unsigned long long ssr_md, | ||
209 | unsigned long long expevt, | ||
210 | unsigned long address) | ||
211 | { | ||
212 | struct task_struct *tsk; | ||
213 | struct mm_struct *mm; | ||
214 | unsigned long long textaccess; | ||
215 | unsigned long long protection_flags; | ||
216 | unsigned long long index; | ||
217 | unsigned long long expevt4; | ||
218 | |||
219 | /* The next few lines implement a way of hashing EXPEVT into a | ||
220 | * small array index which can be used to lookup parameters | ||
221 | * specific to the type of TLBMISS being handled. | ||
222 | * | ||
223 | * Note: | ||
224 | * ITLBMISS has EXPEVT==0xa40 | ||
225 | * RTLBMISS has EXPEVT==0x040 | ||
226 | * WTLBMISS has EXPEVT==0x060 | ||
227 | */ | ||
228 | expevt4 = (expevt >> 4); | ||
229 | /* TODO : xor ssr_md into this expression too. Then we can check | ||
230 | * that PRU is set when it needs to be. */ | ||
231 | index = expevt4 ^ (expevt4 >> 5); | ||
232 | index &= 7; | ||
233 | protection_flags = expevt_lookup_table.protection_flags[index]; | ||
234 | textaccess = expevt_lookup_table.is_text_access[index]; | ||
235 | |||
236 | /* SIM | ||
237 | * Note this is now called with interrupts still disabled | ||
238 | * This is to cope with being called for a missing IO port | ||
239 | * address with interrupts disabled. This should be fixed as | ||
240 | * soon as we have a better 'fast path' miss handler. | ||
241 | * | ||
242 | * Plus take care how you try and debug this stuff. | ||
243 | * For example, writing debug data to a port which you | ||
244 | * have just faulted on is not going to work. | ||
245 | */ | ||
246 | |||
247 | tsk = current; | ||
248 | mm = tsk->mm; | ||
249 | |||
250 | if ((address >= VMALLOC_START && address < VMALLOC_END) || | ||
251 | (address >= IOBASE_VADDR && address < IOBASE_END)) { | ||
252 | if (ssr_md) | ||
253 | /* | ||
254 | * Process-contexts can never have this address | ||
255 | * range mapped | ||
256 | */ | ||
257 | if (handle_vmalloc_fault(mm, protection_flags, | ||
258 | textaccess, address)) | ||
259 | return 1; | ||
260 | } else if (!in_interrupt() && mm) { | ||
261 | if (handle_tlbmiss(mm, protection_flags, textaccess, address)) | ||
262 | return 1; | ||
263 | } | ||
264 | |||
265 | return 0; | ||
266 | } | ||