diff options
author | Jan Beulich <JBeulich@suse.com> | 2012-02-24 09:54:37 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-02-24 14:05:14 -0500 |
commit | 626109130267713cac020515504ec341e47c96f9 (patch) | |
tree | b399c02e5c46536b3feef94df0ceb14ddf50c03a /arch/x86/kernel/entry_64.S | |
parent | a38449ef596b345e13a8f9b7d5cd9fedb8fcf921 (diff) |
x86-64: Fix CFI annotations for NMI nesting code
The saving and restoring of %rdx wasn't annotated at all, and the
jumping over sections where state gets partly restored wasn't handled
either.
Further, by folding the pushing of the previous frame in repeat_nmi
into that which so far was immediately preceding restart_nmi (after
moving the restore of %rdx ahead of that, since it doesn't get used
anymore when pushing prior frames), annotations of the replicated
frame creations can be made consistent too.
v2: Fully fold repeat_nmi into the normal code flow (adding a single
redundant instruction to the "normal" code path), thus retaining
the special protection of all instructions between repeat_nmi and
end_repeat_nmi.
Link: http://lkml.kernel.org/r/4F478B630200007800074A31@nat28.tlf.novell.com
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1333d9851778..e0eca007dc0d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1530,6 +1530,7 @@ ENTRY(nmi) | |||
1530 | 1530 | ||
1531 | /* Use %rdx as out temp variable throughout */ | 1531 | /* Use %rdx as out temp variable throughout */ |
1532 | pushq_cfi %rdx | 1532 | pushq_cfi %rdx |
1533 | CFI_REL_OFFSET rdx, 0 | ||
1533 | 1534 | ||
1534 | /* | 1535 | /* |
1535 | * If %cs was not the kernel segment, then the NMI triggered in user | 1536 | * If %cs was not the kernel segment, then the NMI triggered in user |
@@ -1554,6 +1555,7 @@ ENTRY(nmi) | |||
1554 | */ | 1555 | */ |
1555 | lea 6*8(%rsp), %rdx | 1556 | lea 6*8(%rsp), %rdx |
1556 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi | 1557 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi |
1558 | CFI_REMEMBER_STATE | ||
1557 | 1559 | ||
1558 | nested_nmi: | 1560 | nested_nmi: |
1559 | /* | 1561 | /* |
@@ -1585,10 +1587,12 @@ nested_nmi: | |||
1585 | 1587 | ||
1586 | nested_nmi_out: | 1588 | nested_nmi_out: |
1587 | popq_cfi %rdx | 1589 | popq_cfi %rdx |
1590 | CFI_RESTORE rdx | ||
1588 | 1591 | ||
1589 | /* No need to check faults here */ | 1592 | /* No need to check faults here */ |
1590 | INTERRUPT_RETURN | 1593 | INTERRUPT_RETURN |
1591 | 1594 | ||
1595 | CFI_RESTORE_STATE | ||
1592 | first_nmi: | 1596 | first_nmi: |
1593 | /* | 1597 | /* |
1594 | * Because nested NMIs will use the pushed location that we | 1598 | * Because nested NMIs will use the pushed location that we |
@@ -1624,6 +1628,10 @@ first_nmi: | |||
1624 | * NMI may zero out. The original stack frame and the temp storage | 1628 | * NMI may zero out. The original stack frame and the temp storage |
1625 | * is also used by nested NMIs and can not be trusted on exit. | 1629 | * is also used by nested NMIs and can not be trusted on exit. |
1626 | */ | 1630 | */ |
1631 | /* Do not pop rdx, nested NMIs will corrupt it */ | ||
1632 | movq (%rsp), %rdx | ||
1633 | CFI_RESTORE rdx | ||
1634 | |||
1627 | /* Set the NMI executing variable on the stack. */ | 1635 | /* Set the NMI executing variable on the stack. */ |
1628 | pushq_cfi $1 | 1636 | pushq_cfi $1 |
1629 | 1637 | ||
@@ -1631,14 +1639,31 @@ first_nmi: | |||
1631 | .rept 5 | 1639 | .rept 5 |
1632 | pushq_cfi 6*8(%rsp) | 1640 | pushq_cfi 6*8(%rsp) |
1633 | .endr | 1641 | .endr |
1642 | CFI_DEF_CFA_OFFSET SS+8-RIP | ||
1643 | |||
1644 | /* | ||
1645 | * If there was a nested NMI, the first NMI's iret will return | ||
1646 | * here. But NMIs are still enabled and we can take another | ||
1647 | * nested NMI. The nested NMI checks the interrupted RIP to see | ||
1648 | * if it is between repeat_nmi and end_repeat_nmi, and if so | ||
1649 | * it will just return, as we are about to repeat an NMI anyway. | ||
1650 | * This makes it safe to copy to the stack frame that a nested | ||
1651 | * NMI will update. | ||
1652 | */ | ||
1653 | repeat_nmi: | ||
1654 | /* | ||
1655 | * Update the stack variable to say we are still in NMI (the update | ||
1656 | * is benign for the non-repeat case, where 1 was pushed just above | ||
1657 | * to this very stack slot). | ||
1658 | */ | ||
1659 | movq $1, 5*8(%rsp) | ||
1634 | 1660 | ||
1635 | /* Make another copy, this one may be modified by nested NMIs */ | 1661 | /* Make another copy, this one may be modified by nested NMIs */ |
1636 | .rept 5 | 1662 | .rept 5 |
1637 | pushq_cfi 4*8(%rsp) | 1663 | pushq_cfi 4*8(%rsp) |
1638 | .endr | 1664 | .endr |
1639 | 1665 | CFI_DEF_CFA_OFFSET SS+8-RIP | |
1640 | /* Do not pop rdx, nested NMIs will corrupt it */ | 1666 | end_repeat_nmi: |
1641 | movq 11*8(%rsp), %rdx | ||
1642 | 1667 | ||
1643 | /* | 1668 | /* |
1644 | * Everything below this point can be preempted by a nested | 1669 | * Everything below this point can be preempted by a nested |
@@ -1646,7 +1671,6 @@ first_nmi: | |||
1646 | * caused by an exception and nested NMI will start here, and | 1671 | * caused by an exception and nested NMI will start here, and |
1647 | * can still be preempted by another NMI. | 1672 | * can still be preempted by another NMI. |
1648 | */ | 1673 | */ |
1649 | restart_nmi: | ||
1650 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1674 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1651 | subq $ORIG_RAX-R15, %rsp | 1675 | subq $ORIG_RAX-R15, %rsp |
1652 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | 1676 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
@@ -1675,26 +1699,6 @@ nmi_restore: | |||
1675 | CFI_ENDPROC | 1699 | CFI_ENDPROC |
1676 | END(nmi) | 1700 | END(nmi) |
1677 | 1701 | ||
1678 | /* | ||
1679 | * If an NMI hit an iret because of an exception or breakpoint, | ||
1680 | * it can lose its NMI context, and a nested NMI may come in. | ||
1681 | * In that case, the nested NMI will change the preempted NMI's | ||
1682 | * stack to jump to here when it does the final iret. | ||
1683 | */ | ||
1684 | repeat_nmi: | ||
1685 | INTR_FRAME | ||
1686 | /* Update the stack variable to say we are still in NMI */ | ||
1687 | movq $1, 5*8(%rsp) | ||
1688 | |||
1689 | /* copy the saved stack back to copy stack */ | ||
1690 | .rept 5 | ||
1691 | pushq_cfi 4*8(%rsp) | ||
1692 | .endr | ||
1693 | |||
1694 | jmp restart_nmi | ||
1695 | CFI_ENDPROC | ||
1696 | end_repeat_nmi: | ||
1697 | |||
1698 | ENTRY(ignore_sysret) | 1702 | ENTRY(ignore_sysret) |
1699 | CFI_STARTPROC | 1703 | CFI_STARTPROC |
1700 | mov $-ENOSYS,%eax | 1704 | mov $-ENOSYS,%eax |