aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-01-11 16:44:09 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-11 22:04:53 -0500
commit66c581569e2cb41231b3fcd91a6c9f853d4d4e25 (patch)
treeb7e4629b9eeee9850e2ba4be32c2ee3c0afb6959 /arch/x86_64
parentbf2fcc6fdfe4f4e92bb74f062c0a1be189f3a561 (diff)
[PATCH] x86_64: Convert page fault error codes to symbolic constants.
Much better to deal with these than with the magic numbers. And remove the comment describing the bits - kernel source is no replacement for an architecture manual. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/mm/fault.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 2a9c836133a9..26eac194064b 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -35,6 +35,13 @@
35#include <asm-generic/sections.h> 35#include <asm-generic/sections.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37 37
38/* Page fault error code bits */
39#define PF_PROT (1<<0) /* or no page found */
40#define PF_WRITE (1<<1)
41#define PF_USER (1<<2)
42#define PF_RSVD (1<<3)
43#define PF_INSTR (1<<4)
44
38void bust_spinlocks(int yes) 45void bust_spinlocks(int yes)
39{ 46{
40 int loglevel_save = console_loglevel; 47 int loglevel_save = console_loglevel;
@@ -68,7 +75,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
68 unsigned char *max_instr; 75 unsigned char *max_instr;
69 76
70 /* If it was a exec fault ignore */ 77 /* If it was a exec fault ignore */
71 if (error_code & (1<<4)) 78 if (error_code & PF_INSTR)
72 return 0; 79 return 0;
73 80
74 instr = (unsigned char *)convert_rip_to_linear(current, regs); 81 instr = (unsigned char *)convert_rip_to_linear(current, regs);
@@ -293,13 +300,6 @@ int exception_trace = 1;
293 * This routine handles page faults. It determines the address, 300 * This routine handles page faults. It determines the address,
294 * and the problem, and then passes it off to one of the appropriate 301 * and the problem, and then passes it off to one of the appropriate
295 * routines. 302 * routines.
296 *
297 * error_code:
298 * bit 0 == 0 means no page found, 1 means protection fault
299 * bit 1 == 0 means read, 1 means write
300 * bit 2 == 0 means kernel, 1 means user-mode
301 * bit 3 == 1 means use of reserved bit detected
302 * bit 4 == 1 means fault was an instruction fetch
303 */ 303 */
304asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, 304asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
305 unsigned long error_code) 305 unsigned long error_code)
@@ -350,7 +350,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
350 * is always initialized because it's shared with the main 350 * is always initialized because it's shared with the main
351 * kernel text. Only vmalloc may need PML4 syncups. 351 * kernel text. Only vmalloc may need PML4 syncups.
352 */ 352 */
353 if (!(error_code & 0xd) && 353 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
354 ((address >= VMALLOC_START && address < VMALLOC_END))) { 354 ((address >= VMALLOC_START && address < VMALLOC_END))) {
355 if (vmalloc_fault(address) < 0) 355 if (vmalloc_fault(address) < 0)
356 goto bad_area_nosemaphore; 356 goto bad_area_nosemaphore;
@@ -363,7 +363,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
363 goto bad_area_nosemaphore; 363 goto bad_area_nosemaphore;
364 } 364 }
365 365
366 if (unlikely(error_code & (1 << 3))) 366 if (unlikely(error_code & PF_RSVD))
367 pgtable_bad(address, regs, error_code); 367 pgtable_bad(address, regs, error_code);
368 368
369 /* 369 /*
@@ -390,7 +390,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
390 * thus avoiding the deadlock. 390 * thus avoiding the deadlock.
391 */ 391 */
392 if (!down_read_trylock(&mm->mmap_sem)) { 392 if (!down_read_trylock(&mm->mmap_sem)) {
393 if ((error_code & 4) == 0 && 393 if ((error_code & PF_USER) == 0 &&
394 !search_exception_tables(regs->rip)) 394 !search_exception_tables(regs->rip))
395 goto bad_area_nosemaphore; 395 goto bad_area_nosemaphore;
396 down_read(&mm->mmap_sem); 396 down_read(&mm->mmap_sem);
@@ -417,17 +417,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
417good_area: 417good_area:
418 info.si_code = SEGV_ACCERR; 418 info.si_code = SEGV_ACCERR;
419 write = 0; 419 write = 0;
420 switch (error_code & 3) { 420 switch (error_code & (PF_PROT|PF_WRITE)) {
421 default: /* 3: write, present */ 421 default: /* 3: write, present */
422 /* fall through */ 422 /* fall through */
423 case 2: /* write, not present */ 423 case PF_WRITE: /* write, not present */
424 if (!(vma->vm_flags & VM_WRITE)) 424 if (!(vma->vm_flags & VM_WRITE))
425 goto bad_area; 425 goto bad_area;
426 write++; 426 write++;
427 break; 427 break;
428 case 1: /* read, present */ 428 case PF_PROT: /* read, present */
429 goto bad_area; 429 goto bad_area;
430 case 0: /* read, not present */ 430 case 0: /* read, not present */
431 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 431 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
432 goto bad_area; 432 goto bad_area;
433 } 433 }
@@ -462,7 +462,7 @@ bad_area:
462 462
463bad_area_nosemaphore: 463bad_area_nosemaphore:
464 /* User mode accesses just cause a SIGSEGV */ 464 /* User mode accesses just cause a SIGSEGV */
465 if (error_code & 4) { 465 if (error_code & PF_USER) {
466 if (is_prefetch(regs, address, error_code)) 466 if (is_prefetch(regs, address, error_code))
467 return; 467 return;
468 468
@@ -558,7 +558,7 @@ do_sigbus:
558 up_read(&mm->mmap_sem); 558 up_read(&mm->mmap_sem);
559 559
560 /* Kernel mode? Handle exceptions or die */ 560 /* Kernel mode? Handle exceptions or die */
561 if (!(error_code & 4)) 561 if (!(error_code & PF_USER))
562 goto no_context; 562 goto no_context;
563 563
564 tsk->thread.cr2 = address; 564 tsk->thread.cr2 = address;