aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/traps.c')
-rw-r--r--arch/i386/kernel/traps.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index af0d3f70a81..f21b41e7770 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -476,8 +476,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
476 siginfo_t *info) 476 siginfo_t *info)
477{ 477{
478 struct task_struct *tsk = current; 478 struct task_struct *tsk = current;
479 tsk->thread.error_code = error_code;
480 tsk->thread.trap_no = trapnr;
481 479
482 if (regs->eflags & VM_MASK) { 480 if (regs->eflags & VM_MASK) {
483 if (vm86) 481 if (vm86)
@@ -489,6 +487,18 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
489 goto kernel_trap; 487 goto kernel_trap;
490 488
491 trap_signal: { 489 trap_signal: {
490 /*
491 * We want error_code and trap_no set for userspace faults and
492 * kernelspace faults which result in die(), but not
493 * kernelspace faults which are fixed up. die() gives the
494 * process no chance to handle the signal and notice the
495 * kernel fault information, so that won't result in polluting
496 * the information about previously queued, but not yet
497 * delivered, faults. See also do_general_protection below.
498 */
499 tsk->thread.error_code = error_code;
500 tsk->thread.trap_no = trapnr;
501
492 if (info) 502 if (info)
493 force_sig_info(signr, info, tsk); 503 force_sig_info(signr, info, tsk);
494 else 504 else
@@ -497,8 +507,11 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
497 } 507 }
498 508
499 kernel_trap: { 509 kernel_trap: {
500 if (!fixup_exception(regs)) 510 if (!fixup_exception(regs)) {
511 tsk->thread.error_code = error_code;
512 tsk->thread.trap_no = trapnr;
501 die(str, regs, error_code); 513 die(str, regs, error_code);
514 }
502 return; 515 return;
503 } 516 }
504 517
@@ -583,7 +596,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
583 * and we set the offset field correctly. Then we let the CPU to 596 * and we set the offset field correctly. Then we let the CPU to
584 * restart the faulting instruction. 597 * restart the faulting instruction.
585 */ 598 */
586 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && 599 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
587 thread->io_bitmap_ptr) { 600 thread->io_bitmap_ptr) {
588 memcpy(tss->io_bitmap, thread->io_bitmap_ptr, 601 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
589 thread->io_bitmap_max); 602 thread->io_bitmap_max);
@@ -596,16 +609,13 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
596 thread->io_bitmap_max, 0xff, 609 thread->io_bitmap_max, 0xff,
597 tss->io_bitmap_max - thread->io_bitmap_max); 610 tss->io_bitmap_max - thread->io_bitmap_max);
598 tss->io_bitmap_max = thread->io_bitmap_max; 611 tss->io_bitmap_max = thread->io_bitmap_max;
599 tss->io_bitmap_base = IO_BITMAP_OFFSET; 612 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
600 tss->io_bitmap_owner = thread; 613 tss->io_bitmap_owner = thread;
601 put_cpu(); 614 put_cpu();
602 return; 615 return;
603 } 616 }
604 put_cpu(); 617 put_cpu();
605 618
606 current->thread.error_code = error_code;
607 current->thread.trap_no = 13;
608
609 if (regs->eflags & VM_MASK) 619 if (regs->eflags & VM_MASK)
610 goto gp_in_vm86; 620 goto gp_in_vm86;
611 621
@@ -624,6 +634,8 @@ gp_in_vm86:
624 634
625gp_in_kernel: 635gp_in_kernel:
626 if (!fixup_exception(regs)) { 636 if (!fixup_exception(regs)) {
637 current->thread.error_code = error_code;
638 current->thread.trap_no = 13;
627 if (notify_die(DIE_GPF, "general protection fault", regs, 639 if (notify_die(DIE_GPF, "general protection fault", regs,
628 error_code, 13, SIGSEGV) == NOTIFY_STOP) 640 error_code, 13, SIGSEGV) == NOTIFY_STOP)
629 return; 641 return;
@@ -1018,9 +1030,7 @@ fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1018fastcall unsigned long patch_espfix_desc(unsigned long uesp, 1030fastcall unsigned long patch_espfix_desc(unsigned long uesp,
1019 unsigned long kesp) 1031 unsigned long kesp)
1020{ 1032{
1021 int cpu = smp_processor_id(); 1033 struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
1022 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1023 struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
1024 unsigned long base = (kesp - uesp) & -THREAD_SIZE; 1034 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
1025 unsigned long new_kesp = kesp - base; 1035 unsigned long new_kesp = kesp - base;
1026 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; 1036 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;