diff options
| author | Denys Vlasenko <dvlasenk@redhat.com> | 2015-01-08 11:25:15 -0500 |
|---|---|---|
| committer | Andy Lutomirski <luto@amacapital.net> | 2015-01-13 17:18:08 -0500 |
| commit | f6f64681d9d87ded48a90b644b2991c6ee05da2d (patch) | |
| tree | de0efb82bbfdb94362e4c639a12a7fbf9f75e015 | |
| parent | 6c3176a21652e506ca7efb8fa37a651a3c513bb5 (diff) | |
x86: entry_64.S: fold SAVE_ARGS_IRQ macro into its sole user
No code changes.
This is a preparatory patch for change in "struct pt_regs" handling.
CC: Linus Torvalds <torvalds@linux-foundation.org>
CC: Oleg Nesterov <oleg@redhat.com>
CC: "H. Peter Anvin" <hpa@zytor.com>
CC: Andy Lutomirski <luto@amacapital.net>
CC: Frederic Weisbecker <fweisbec@gmail.com>
CC: X86 ML <x86@kernel.org>
CC: Alexei Starovoitov <ast@plumgrid.com>
CC: Will Drewry <wad@chromium.org>
CC: Kees Cook <keescook@chromium.org>
CC: linux-kernel@vger.kernel.org
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
| -rw-r--r-- | arch/x86/kernel/entry_64.S | 88 |
1 files changed, 42 insertions, 46 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 5ed4773e89f9..7d59df23e5bb 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -217,51 +217,6 @@ ENDPROC(native_usergs_sysret64) | |||
| 217 | CFI_REL_OFFSET r15, R15+\offset | 217 | CFI_REL_OFFSET r15, R15+\offset |
| 218 | .endm | 218 | .endm |
| 219 | 219 | ||
| 220 | /* save partial stack frame */ | ||
| 221 | .macro SAVE_ARGS_IRQ | ||
| 222 | cld | ||
| 223 | /* start from rbp in pt_regs and jump over */ | ||
| 224 | movq_cfi rdi, (RDI-RBP) | ||
| 225 | movq_cfi rsi, (RSI-RBP) | ||
| 226 | movq_cfi rdx, (RDX-RBP) | ||
| 227 | movq_cfi rcx, (RCX-RBP) | ||
| 228 | movq_cfi rax, (RAX-RBP) | ||
| 229 | movq_cfi r8, (R8-RBP) | ||
| 230 | movq_cfi r9, (R9-RBP) | ||
| 231 | movq_cfi r10, (R10-RBP) | ||
| 232 | movq_cfi r11, (R11-RBP) | ||
| 233 | |||
| 234 | /* Save rbp so that we can unwind from get_irq_regs() */ | ||
| 235 | movq_cfi rbp, 0 | ||
| 236 | |||
| 237 | /* Save previous stack value */ | ||
| 238 | movq %rsp, %rsi | ||
| 239 | |||
| 240 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | ||
| 241 | testl $3, CS-RBP(%rsi) | ||
| 242 | je 1f | ||
| 243 | SWAPGS | ||
| 244 | /* | ||
| 245 | * irq_count is used to check if a CPU is already on an interrupt stack | ||
| 246 | * or not. While this is essentially redundant with preempt_count it is | ||
| 247 | * a little cheaper to use a separate counter in the PDA (short of | ||
| 248 | * moving irq_enter into assembly, which would be too much work) | ||
| 249 | */ | ||
| 250 | 1: incl PER_CPU_VAR(irq_count) | ||
| 251 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | ||
| 252 | CFI_DEF_CFA_REGISTER rsi | ||
| 253 | |||
| 254 | /* Store previous stack value */ | ||
| 255 | pushq %rsi | ||
| 256 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ | ||
| 257 | 0x77 /* DW_OP_breg7 */, 0, \ | ||
| 258 | 0x06 /* DW_OP_deref */, \ | ||
| 259 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ | ||
| 260 | 0x22 /* DW_OP_plus */ | ||
| 261 | /* We entered an interrupt context - irqs are off: */ | ||
| 262 | TRACE_IRQS_OFF | ||
| 263 | .endm | ||
| 264 | |||
| 265 | ENTRY(save_paranoid) | 220 | ENTRY(save_paranoid) |
| 266 | XCPT_FRAME 1 RDI+8 | 221 | XCPT_FRAME 1 RDI+8 |
| 267 | cld | 222 | cld |
| @@ -745,7 +700,48 @@ END(interrupt) | |||
| 745 | /* reserve pt_regs for scratch regs and rbp */ | 700 | /* reserve pt_regs for scratch regs and rbp */ |
| 746 | subq $ORIG_RAX-RBP, %rsp | 701 | subq $ORIG_RAX-RBP, %rsp |
| 747 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | 702 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP |
| 748 | SAVE_ARGS_IRQ | 703 | cld |
| 704 | /* start from rbp in pt_regs and jump over */ | ||
| 705 | movq_cfi rdi, (RDI-RBP) | ||
| 706 | movq_cfi rsi, (RSI-RBP) | ||
| 707 | movq_cfi rdx, (RDX-RBP) | ||
| 708 | movq_cfi rcx, (RCX-RBP) | ||
| 709 | movq_cfi rax, (RAX-RBP) | ||
| 710 | movq_cfi r8, (R8-RBP) | ||
| 711 | movq_cfi r9, (R9-RBP) | ||
| 712 | movq_cfi r10, (R10-RBP) | ||
| 713 | movq_cfi r11, (R11-RBP) | ||
| 714 | |||
| 715 | /* Save rbp so that we can unwind from get_irq_regs() */ | ||
| 716 | movq_cfi rbp, 0 | ||
| 717 | |||
| 718 | /* Save previous stack value */ | ||
| 719 | movq %rsp, %rsi | ||
| 720 | |||
| 721 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | ||
| 722 | testl $3, CS-RBP(%rsi) | ||
| 723 | je 1f | ||
| 724 | SWAPGS | ||
| 725 | /* | ||
| 726 | * irq_count is used to check if a CPU is already on an interrupt stack | ||
| 727 | * or not. While this is essentially redundant with preempt_count it is | ||
| 728 | * a little cheaper to use a separate counter in the PDA (short of | ||
| 729 | * moving irq_enter into assembly, which would be too much work) | ||
| 730 | */ | ||
| 731 | 1: incl PER_CPU_VAR(irq_count) | ||
| 732 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | ||
| 733 | CFI_DEF_CFA_REGISTER rsi | ||
| 734 | |||
| 735 | /* Store previous stack value */ | ||
| 736 | pushq %rsi | ||
| 737 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ | ||
| 738 | 0x77 /* DW_OP_breg7 */, 0, \ | ||
| 739 | 0x06 /* DW_OP_deref */, \ | ||
| 740 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ | ||
| 741 | 0x22 /* DW_OP_plus */ | ||
| 742 | /* We entered an interrupt context - irqs are off: */ | ||
| 743 | TRACE_IRQS_OFF | ||
| 744 | |||
| 749 | call \func | 745 | call \func |
| 750 | .endm | 746 | .endm |
| 751 | 747 | ||
