aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-23 06:00:57 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:16 -0500
commitdd287796d608fcdc3fe5e8fdb5bf762a8f1bc32a (patch)
tree84be163fdc5fe36eb8d3f1aa5e60bfd1d794c641 /arch
parent41c28ff1635e71af072c4711ff5fadd5855d48e7 (diff)
[PATCH] pause_on_oops command line option
Attempt to fix the problem wherein people's oops reports scroll off the screen due to repeated oopsing or to oopses on other CPUs. If this happens the user can reboot with the `pause_on_oops=<seconds>' option. It will allow the first oopsing CPU to print an oops record just a single time. Second oopsing attempts, or oopses on other CPUs will cause those CPUs to enter a tight loop until the specified number of seconds have elapsed. The patch implements the infrastructure generically in the expectation that architectures other than x86 will find it useful. Cc: Dave Jones <davej@codemonkey.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/traps.c3
-rw-r--r--arch/i386/mm/fault.c39
2 files changed, 26 insertions, 16 deletions
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 1b7ad4115d81..de5386b01d38 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -352,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err)
352 static int die_counter; 352 static int die_counter;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 oops_enter();
356
355 if (die.lock_owner != raw_smp_processor_id()) { 357 if (die.lock_owner != raw_smp_processor_id()) {
356 console_verbose(); 358 console_verbose();
357 spin_lock_irqsave(&die.lock, flags); 359 spin_lock_irqsave(&die.lock, flags);
@@ -404,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err)
404 ssleep(5); 406 ssleep(5);
405 panic("Fatal exception"); 407 panic("Fatal exception");
406 } 408 }
409 oops_exit();
407 do_exit(SIGSEGV); 410 do_exit(SIGSEGV);
408} 411}
409 412
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 47a3b72ec7b6..7f0fcf219a26 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -509,24 +509,31 @@ no_context:
509 509
510 bust_spinlocks(1); 510 bust_spinlocks(1);
511 511
512#ifdef CONFIG_X86_PAE 512 if (oops_may_print()) {
513 if (error_code & 16) { 513 #ifdef CONFIG_X86_PAE
514 pte_t *pte = lookup_address(address); 514 if (error_code & 16) {
515 515 pte_t *pte = lookup_address(address);
516 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 516
517 printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); 517 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
518 printk(KERN_CRIT "kernel tried to execute "
519 "NX-protected page - exploit attempt? "
520 "(uid: %d)\n", current->uid);
521 }
522 #endif
523 if (address < PAGE_SIZE)
524 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
525 "pointer dereference");
526 else
527 printk(KERN_ALERT "BUG: unable to handle kernel paging"
528 " request");
529 printk(" at virtual address %08lx\n",address);
530 printk(KERN_ALERT " printing eip:\n");
531 printk("%08lx\n", regs->eip);
518 } 532 }
519#endif
520 if (address < PAGE_SIZE)
521 printk(KERN_ALERT "BUG: unable to handle kernel NULL pointer dereference");
522 else
523 printk(KERN_ALERT "BUG: unable to handle kernel paging request");
524 printk(" at virtual address %08lx\n",address);
525 printk(KERN_ALERT " printing eip:\n");
526 printk("%08lx\n", regs->eip);
527 page = read_cr3(); 533 page = read_cr3();
528 page = ((unsigned long *) __va(page))[address >> 22]; 534 page = ((unsigned long *) __va(page))[address >> 22];
529 printk(KERN_ALERT "*pde = %08lx\n", page); 535 if (oops_may_print())
536 printk(KERN_ALERT "*pde = %08lx\n", page);
530 /* 537 /*
531 * We must not directly access the pte in the highpte 538 * We must not directly access the pte in the highpte
532 * case, the page table might be allocated in highmem. 539 * case, the page table might be allocated in highmem.
@@ -534,7 +541,7 @@ no_context:
534 * it's allocated already. 541 * it's allocated already.
535 */ 542 */
536#ifndef CONFIG_HIGHPTE 543#ifndef CONFIG_HIGHPTE
537 if (page & 1) { 544 if ((page & 1) && oops_may_print()) {
538 page &= PAGE_MASK; 545 page &= PAGE_MASK;
539 address &= 0x003ff000; 546 address &= 0x003ff000;
540 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 547 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];