aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris/mm/fault.c')
-rw-r--r--arch/cris/mm/fault.c101
1 files changed, 86 insertions, 15 deletions
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 03254b9eded1..934c51078cce 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -6,6 +6,38 @@
6 * Authors: Bjorn Wesen 6 * Authors: Bjorn Wesen
7 * 7 *
8 * $Log: fault.c,v $ 8 * $Log: fault.c,v $
9 * Revision 1.20 2005/03/04 08:16:18 starvik
10 * Merge of Linux 2.6.11.
11 *
12 * Revision 1.19 2005/01/14 10:07:59 starvik
13 * Fixed warning.
14 *
15 * Revision 1.18 2005/01/12 08:10:14 starvik
16 * Readded the change of frametype when handling kernel page fault fixup
17 * for v10. This is necessary to avoid that the CPU remakes the faulting
18 * access.
19 *
20 * Revision 1.17 2005/01/11 13:53:05 starvik
21 * Use raw_printk.
22 *
23 * Revision 1.16 2004/12/17 11:39:41 starvik
24 * SMP support.
25 *
26 * Revision 1.15 2004/11/23 18:36:18 starvik
27 * Stack is now non-executable.
28 * Signal handler trampolines are placed in a reserved page mapped into all
29 * processes.
30 *
31 * Revision 1.14 2004/11/23 07:10:21 starvik
32 * Moved find_fixup_code to generic code.
33 *
34 * Revision 1.13 2004/11/23 07:00:54 starvik
35 * Actually use the execute permission bit in the MMU. This makes it possible
36 * to prevent e.g. attacks where executable code is put on the stack.
37 *
38 * Revision 1.12 2004/09/29 06:16:04 starvik
39 * Use instruction_pointer
40 *
9 * Revision 1.11 2004/05/14 07:58:05 starvik 41 * Revision 1.11 2004/05/14 07:58:05 starvik
10 * Merge of changes from 2.4 42 * Merge of changes from 2.4
11 * 43 *
@@ -103,6 +135,7 @@
103 135
104extern int find_fixup_code(struct pt_regs *); 136extern int find_fixup_code(struct pt_regs *);
105extern void die_if_kernel(const char *, struct pt_regs *, long); 137extern void die_if_kernel(const char *, struct pt_regs *, long);
138extern int raw_printk(const char *fmt, ...);
106 139
107/* debug of low-level TLB reload */ 140/* debug of low-level TLB reload */
108#undef DEBUG 141#undef DEBUG
@@ -118,7 +151,8 @@ extern void die_if_kernel(const char *, struct pt_regs *, long);
118 151
119/* current active page directory */ 152/* current active page directory */
120 153
121volatile pgd_t *current_pgd; 154volatile DEFINE_PER_CPU(pgd_t *,current_pgd);
155unsigned long cris_signal_return_page;
122 156
123/* 157/*
124 * This routine handles page faults. It determines the address, 158 * This routine handles page faults. It determines the address,
@@ -146,8 +180,9 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
146 struct vm_area_struct * vma; 180 struct vm_area_struct * vma;
147 siginfo_t info; 181 siginfo_t info;
148 182
149 D(printk("Page fault for %X at %X, prot %d write %d\n", 183 D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n",
150 address, regs->erp, protection, writeaccess)); 184 address, smp_processor_id(), instruction_pointer(regs),
185 protection, writeaccess));
151 186
152 tsk = current; 187 tsk = current;
153 188
@@ -175,8 +210,19 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
175 !user_mode(regs)) 210 !user_mode(regs))
176 goto vmalloc_fault; 211 goto vmalloc_fault;
177 212
213 /* When stack execution is not allowed we store the signal
214 * trampolines in the reserved cris_signal_return_page.
215 * Handle this in the exact same way as vmalloc (we know
216 * that the mapping is there and is valid so no need to
217 * call handle_mm_fault).
218 */
219 if (cris_signal_return_page &&
220 address == cris_signal_return_page &&
221 !protection && user_mode(regs))
222 goto vmalloc_fault;
223
178 /* we can and should enable interrupts at this point */ 224 /* we can and should enable interrupts at this point */
179 sti(); 225 local_irq_enable();
180 226
181 mm = tsk->mm; 227 mm = tsk->mm;
182 info.si_code = SEGV_MAPERR; 228 info.si_code = SEGV_MAPERR;
@@ -220,7 +266,10 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
220 266
221 /* first do some preliminary protection checks */ 267 /* first do some preliminary protection checks */
222 268
223 if (writeaccess) { 269 if (writeaccess == 2){
270 if (!(vma->vm_flags & VM_EXEC))
271 goto bad_area;
272 } else if (writeaccess == 1) {
224 if (!(vma->vm_flags & VM_WRITE)) 273 if (!(vma->vm_flags & VM_WRITE))
225 goto bad_area; 274 goto bad_area;
226 } else { 275 } else {
@@ -234,14 +283,14 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
234 * the fault. 283 * the fault.
235 */ 284 */
236 285
237 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 286 switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) {
238 case 1: 287 case VM_FAULT_MINOR:
239 tsk->min_flt++; 288 tsk->min_flt++;
240 break; 289 break;
241 case 2: 290 case VM_FAULT_MAJOR:
242 tsk->maj_flt++; 291 tsk->maj_flt++;
243 break; 292 break;
244 case 0: 293 case VM_FAULT_SIGBUS:
245 goto do_sigbus; 294 goto do_sigbus;
246 default: 295 default:
247 goto out_of_memory; 296 goto out_of_memory;
@@ -292,10 +341,10 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
292 */ 341 */
293 342
294 if ((unsigned long) (address) < PAGE_SIZE) 343 if ((unsigned long) (address) < PAGE_SIZE)
295 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 344 raw_printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
296 else 345 else
297 printk(KERN_ALERT "Unable to handle kernel access"); 346 raw_printk(KERN_ALERT "Unable to handle kernel access");
298 printk(" at virtual address %08lx\n",address); 347 raw_printk(" at virtual address %08lx\n",address);
299 348
300 die_if_kernel("Oops", regs, (writeaccess << 1) | protection); 349 die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
301 350
@@ -346,10 +395,11 @@ vmalloc_fault:
346 395
347 int offset = pgd_index(address); 396 int offset = pgd_index(address);
348 pgd_t *pgd, *pgd_k; 397 pgd_t *pgd, *pgd_k;
398 pud_t *pud, *pud_k;
349 pmd_t *pmd, *pmd_k; 399 pmd_t *pmd, *pmd_k;
350 pte_t *pte_k; 400 pte_t *pte_k;
351 401
352 pgd = (pgd_t *)current_pgd + offset; 402 pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
353 pgd_k = init_mm.pgd + offset; 403 pgd_k = init_mm.pgd + offset;
354 404
355 /* Since we're two-level, we don't need to do both 405 /* Since we're two-level, we don't need to do both
@@ -364,8 +414,13 @@ vmalloc_fault:
364 * it exists. 414 * it exists.
365 */ 415 */
366 416
367 pmd = pmd_offset(pgd, address); 417 pud = pud_offset(pgd, address);
368 pmd_k = pmd_offset(pgd_k, address); 418 pud_k = pud_offset(pgd_k, address);
419 if (!pud_present(*pud_k))
420 goto no_context;
421
422 pmd = pmd_offset(pud, address);
423 pmd_k = pmd_offset(pud_k, address);
369 424
370 if (!pmd_present(*pmd_k)) 425 if (!pmd_present(*pmd_k))
371 goto bad_area_nosemaphore; 426 goto bad_area_nosemaphore;
@@ -385,3 +440,19 @@ vmalloc_fault:
385 return; 440 return;
386 } 441 }
387} 442}
443
444/* Find fixup code. */
445int
446find_fixup_code(struct pt_regs *regs)
447{
448 const struct exception_table_entry *fixup;
449
450 if ((fixup = search_exception_tables(instruction_pointer(regs))) != 0) {
451 /* Adjust the instruction pointer in the stackframe. */
452 instruction_pointer(regs) = fixup->fixup;
453 arch_fixup(regs);
454 return 1;
455 }
456
457 return 0;
458}