diff options
Diffstat (limited to 'arch/i386/kernel/entry.S')
| -rw-r--r-- | arch/i386/kernel/entry.S | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 787190c45fdb..d9a260f2efb4 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | 42 | ||
| 43 | #include <linux/linkage.h> | 43 | #include <linux/linkage.h> |
| 44 | #include <asm/thread_info.h> | 44 | #include <asm/thread_info.h> |
| 45 | #include <asm/irqflags.h> | ||
| 45 | #include <asm/errno.h> | 46 | #include <asm/errno.h> |
| 46 | #include <asm/segment.h> | 47 | #include <asm/segment.h> |
| 47 | #include <asm/smp.h> | 48 | #include <asm/smp.h> |
| @@ -76,12 +77,21 @@ NT_MASK = 0x00004000 | |||
| 76 | VM_MASK = 0x00020000 | 77 | VM_MASK = 0x00020000 |
| 77 | 78 | ||
| 78 | #ifdef CONFIG_PREEMPT | 79 | #ifdef CONFIG_PREEMPT |
| 79 | #define preempt_stop cli | 80 | #define preempt_stop cli; TRACE_IRQS_OFF |
| 80 | #else | 81 | #else |
| 81 | #define preempt_stop | 82 | #define preempt_stop |
| 82 | #define resume_kernel restore_nocheck | 83 | #define resume_kernel restore_nocheck |
| 83 | #endif | 84 | #endif |
| 84 | 85 | ||
| 86 | .macro TRACE_IRQS_IRET | ||
| 87 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 88 | testl $IF_MASK,EFLAGS(%esp) # interrupts off? | ||
| 89 | jz 1f | ||
| 90 | TRACE_IRQS_ON | ||
| 91 | 1: | ||
| 92 | #endif | ||
| 93 | .endm | ||
| 94 | |||
| 85 | #ifdef CONFIG_VM86 | 95 | #ifdef CONFIG_VM86 |
| 86 | #define resume_userspace_sig check_userspace | 96 | #define resume_userspace_sig check_userspace |
| 87 | #else | 97 | #else |
| @@ -257,6 +267,10 @@ ENTRY(sysenter_entry) | |||
| 257 | CFI_REGISTER esp, ebp | 267 | CFI_REGISTER esp, ebp |
| 258 | movl TSS_sysenter_esp0(%esp),%esp | 268 | movl TSS_sysenter_esp0(%esp),%esp |
| 259 | sysenter_past_esp: | 269 | sysenter_past_esp: |
| 270 | /* | ||
| 271 | * No need to follow this irqs on/off section: the syscall | ||
| 272 | * disabled irqs and here we enable it straight after entry: | ||
| 273 | */ | ||
| 260 | sti | 274 | sti |
| 261 | pushl $(__USER_DS) | 275 | pushl $(__USER_DS) |
| 262 | CFI_ADJUST_CFA_OFFSET 4 | 276 | CFI_ADJUST_CFA_OFFSET 4 |
| @@ -303,6 +317,7 @@ sysenter_past_esp: | |||
| 303 | call *sys_call_table(,%eax,4) | 317 | call *sys_call_table(,%eax,4) |
| 304 | movl %eax,EAX(%esp) | 318 | movl %eax,EAX(%esp) |
| 305 | cli | 319 | cli |
| 320 | TRACE_IRQS_OFF | ||
| 306 | movl TI_flags(%ebp), %ecx | 321 | movl TI_flags(%ebp), %ecx |
| 307 | testw $_TIF_ALLWORK_MASK, %cx | 322 | testw $_TIF_ALLWORK_MASK, %cx |
| 308 | jne syscall_exit_work | 323 | jne syscall_exit_work |
| @@ -310,6 +325,7 @@ sysenter_past_esp: | |||
| 310 | movl EIP(%esp), %edx | 325 | movl EIP(%esp), %edx |
| 311 | movl OLDESP(%esp), %ecx | 326 | movl OLDESP(%esp), %ecx |
| 312 | xorl %ebp,%ebp | 327 | xorl %ebp,%ebp |
| 328 | TRACE_IRQS_ON | ||
| 313 | sti | 329 | sti |
| 314 | sysexit | 330 | sysexit |
| 315 | CFI_ENDPROC | 331 | CFI_ENDPROC |
| @@ -339,6 +355,7 @@ syscall_exit: | |||
| 339 | cli # make sure we don't miss an interrupt | 355 | cli # make sure we don't miss an interrupt |
| 340 | # setting need_resched or sigpending | 356 | # setting need_resched or sigpending |
| 341 | # between sampling and the iret | 357 | # between sampling and the iret |
| 358 | TRACE_IRQS_OFF | ||
| 342 | movl TI_flags(%ebp), %ecx | 359 | movl TI_flags(%ebp), %ecx |
| 343 | testw $_TIF_ALLWORK_MASK, %cx # current->work | 360 | testw $_TIF_ALLWORK_MASK, %cx # current->work |
| 344 | jne syscall_exit_work | 361 | jne syscall_exit_work |
| @@ -355,12 +372,15 @@ restore_all: | |||
| 355 | CFI_REMEMBER_STATE | 372 | CFI_REMEMBER_STATE |
| 356 | je ldt_ss # returning to user-space with LDT SS | 373 | je ldt_ss # returning to user-space with LDT SS |
| 357 | restore_nocheck: | 374 | restore_nocheck: |
| 375 | TRACE_IRQS_IRET | ||
| 376 | restore_nocheck_notrace: | ||
| 358 | RESTORE_REGS | 377 | RESTORE_REGS |
| 359 | addl $4, %esp | 378 | addl $4, %esp |
| 360 | CFI_ADJUST_CFA_OFFSET -4 | 379 | CFI_ADJUST_CFA_OFFSET -4 |
| 361 | 1: iret | 380 | 1: iret |
| 362 | .section .fixup,"ax" | 381 | .section .fixup,"ax" |
| 363 | iret_exc: | 382 | iret_exc: |
| 383 | TRACE_IRQS_ON | ||
| 364 | sti | 384 | sti |
| 365 | pushl $0 # no error code | 385 | pushl $0 # no error code |
| 366 | pushl $do_iret_error | 386 | pushl $do_iret_error |
| @@ -386,11 +406,13 @@ ldt_ss: | |||
| 386 | subl $8, %esp # reserve space for switch16 pointer | 406 | subl $8, %esp # reserve space for switch16 pointer |
| 387 | CFI_ADJUST_CFA_OFFSET 8 | 407 | CFI_ADJUST_CFA_OFFSET 8 |
| 388 | cli | 408 | cli |
| 409 | TRACE_IRQS_OFF | ||
| 389 | movl %esp, %eax | 410 | movl %esp, %eax |
| 390 | /* Set up the 16bit stack frame with switch32 pointer on top, | 411 | /* Set up the 16bit stack frame with switch32 pointer on top, |
| 391 | * and a switch16 pointer on top of the current frame. */ | 412 | * and a switch16 pointer on top of the current frame. */ |
| 392 | call setup_x86_bogus_stack | 413 | call setup_x86_bogus_stack |
| 393 | CFI_ADJUST_CFA_OFFSET -8 # frame has moved | 414 | CFI_ADJUST_CFA_OFFSET -8 # frame has moved |
| 415 | TRACE_IRQS_IRET | ||
| 394 | RESTORE_REGS | 416 | RESTORE_REGS |
| 395 | lss 20+4(%esp), %esp # switch to 16bit stack | 417 | lss 20+4(%esp), %esp # switch to 16bit stack |
| 396 | 1: iret | 418 | 1: iret |
| @@ -411,6 +433,7 @@ work_resched: | |||
| 411 | cli # make sure we don't miss an interrupt | 433 | cli # make sure we don't miss an interrupt |
| 412 | # setting need_resched or sigpending | 434 | # setting need_resched or sigpending |
| 413 | # between sampling and the iret | 435 | # between sampling and the iret |
| 436 | TRACE_IRQS_OFF | ||
| 414 | movl TI_flags(%ebp), %ecx | 437 | movl TI_flags(%ebp), %ecx |
| 415 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | 438 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other |
| 416 | # than syscall tracing? | 439 | # than syscall tracing? |
| @@ -462,6 +485,7 @@ syscall_trace_entry: | |||
| 462 | syscall_exit_work: | 485 | syscall_exit_work: |
| 463 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 486 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
| 464 | jz work_pending | 487 | jz work_pending |
| 488 | TRACE_IRQS_ON | ||
| 465 | sti # could let do_syscall_trace() call | 489 | sti # could let do_syscall_trace() call |
| 466 | # schedule() instead | 490 | # schedule() instead |
| 467 | movl %esp, %eax | 491 | movl %esp, %eax |
| @@ -535,9 +559,14 @@ ENTRY(irq_entries_start) | |||
| 535 | vector=vector+1 | 559 | vector=vector+1 |
| 536 | .endr | 560 | .endr |
| 537 | 561 | ||
| 562 | /* | ||
| 563 | * the CPU automatically disables interrupts when executing an IRQ vector, | ||
| 564 | * so IRQ-flags tracing has to follow that: | ||
| 565 | */ | ||
| 538 | ALIGN | 566 | ALIGN |
| 539 | common_interrupt: | 567 | common_interrupt: |
| 540 | SAVE_ALL | 568 | SAVE_ALL |
| 569 | TRACE_IRQS_OFF | ||
| 541 | movl %esp,%eax | 570 | movl %esp,%eax |
| 542 | call do_IRQ | 571 | call do_IRQ |
| 543 | jmp ret_from_intr | 572 | jmp ret_from_intr |
| @@ -549,9 +578,10 @@ ENTRY(name) \ | |||
| 549 | pushl $~(nr); \ | 578 | pushl $~(nr); \ |
| 550 | CFI_ADJUST_CFA_OFFSET 4; \ | 579 | CFI_ADJUST_CFA_OFFSET 4; \ |
| 551 | SAVE_ALL; \ | 580 | SAVE_ALL; \ |
| 581 | TRACE_IRQS_OFF \ | ||
| 552 | movl %esp,%eax; \ | 582 | movl %esp,%eax; \ |
| 553 | call smp_/**/name; \ | 583 | call smp_/**/name; \ |
| 554 | jmp ret_from_intr; \ | 584 | jmp ret_from_intr; \ |
| 555 | CFI_ENDPROC | 585 | CFI_ENDPROC |
| 556 | 586 | ||
| 557 | /* The include is where all of the SMP etc. interrupts come from */ | 587 | /* The include is where all of the SMP etc. interrupts come from */ |
| @@ -726,7 +756,7 @@ nmi_stack_correct: | |||
| 726 | xorl %edx,%edx # zero error code | 756 | xorl %edx,%edx # zero error code |
| 727 | movl %esp,%eax # pt_regs pointer | 757 | movl %esp,%eax # pt_regs pointer |
| 728 | call do_nmi | 758 | call do_nmi |
| 729 | jmp restore_all | 759 | jmp restore_nocheck_notrace |
| 730 | CFI_ENDPROC | 760 | CFI_ENDPROC |
| 731 | 761 | ||
| 732 | nmi_stack_fixup: | 762 | nmi_stack_fixup: |
