diff options
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 7c27da407da7..c52b60919163 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -337,12 +337,12 @@ ENTRY(save_args) | |||
337 | je 1f | 337 | je 1f |
338 | SWAPGS | 338 | SWAPGS |
339 | /* | 339 | /* |
340 | * irqcount is used to check if a CPU is already on an interrupt stack | 340 | * irq_count is used to check if a CPU is already on an interrupt stack |
341 | * or not. While this is essentially redundant with preempt_count it is | 341 | * or not. While this is essentially redundant with preempt_count it is |
342 | * a little cheaper to use a separate counter in the PDA (short of | 342 | * a little cheaper to use a separate counter in the PDA (short of |
343 | * moving irq_enter into assembly, which would be too much work) | 343 | * moving irq_enter into assembly, which would be too much work) |
344 | */ | 344 | */ |
345 | 1: incl %gs:pda_irqcount | 345 | 1: incl PER_CPU_VAR(irq_count) |
346 | jne 2f | 346 | jne 2f |
347 | popq_cfi %rax /* move return address... */ | 347 | popq_cfi %rax /* move return address... */ |
348 | mov PER_CPU_VAR(irq_stack_ptr),%rsp | 348 | mov PER_CPU_VAR(irq_stack_ptr),%rsp |
@@ -837,7 +837,7 @@ common_interrupt: | |||
837 | ret_from_intr: | 837 | ret_from_intr: |
838 | DISABLE_INTERRUPTS(CLBR_NONE) | 838 | DISABLE_INTERRUPTS(CLBR_NONE) |
839 | TRACE_IRQS_OFF | 839 | TRACE_IRQS_OFF |
840 | decl %gs:pda_irqcount | 840 | decl PER_CPU_VAR(irq_count) |
841 | leaveq | 841 | leaveq |
842 | CFI_DEF_CFA_REGISTER rsp | 842 | CFI_DEF_CFA_REGISTER rsp |
843 | CFI_ADJUST_CFA_OFFSET -8 | 843 | CFI_ADJUST_CFA_OFFSET -8 |
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq) | |||
1260 | CFI_REL_OFFSET rbp,0 | 1260 | CFI_REL_OFFSET rbp,0 |
1261 | mov %rsp,%rbp | 1261 | mov %rsp,%rbp |
1262 | CFI_DEF_CFA_REGISTER rbp | 1262 | CFI_DEF_CFA_REGISTER rbp |
1263 | incl %gs:pda_irqcount | 1263 | incl PER_CPU_VAR(irq_count) |
1264 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp | 1264 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
1265 | push %rbp # backlink for old unwinder | 1265 | push %rbp # backlink for old unwinder |
1266 | call __do_softirq | 1266 | call __do_softirq |
1267 | leaveq | 1267 | leaveq |
1268 | CFI_DEF_CFA_REGISTER rsp | 1268 | CFI_DEF_CFA_REGISTER rsp |
1269 | CFI_ADJUST_CFA_OFFSET -8 | 1269 | CFI_ADJUST_CFA_OFFSET -8 |
1270 | decl %gs:pda_irqcount | 1270 | decl PER_CPU_VAR(irq_count) |
1271 | ret | 1271 | ret |
1272 | CFI_ENDPROC | 1272 | CFI_ENDPROC |
1273 | END(call_softirq) | 1273 | END(call_softirq) |
@@ -1297,7 +1297,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1297 | movq %rdi, %rsp # we don't return, adjust the stack frame | 1297 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1298 | CFI_ENDPROC | 1298 | CFI_ENDPROC |
1299 | DEFAULT_FRAME | 1299 | DEFAULT_FRAME |
1300 | 11: incl %gs:pda_irqcount | 1300 | 11: incl PER_CPU_VAR(irq_count) |
1301 | movq %rsp,%rbp | 1301 | movq %rsp,%rbp |
1302 | CFI_DEF_CFA_REGISTER rbp | 1302 | CFI_DEF_CFA_REGISTER rbp |
1303 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | 1303 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
@@ -1305,7 +1305,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1305 | call xen_evtchn_do_upcall | 1305 | call xen_evtchn_do_upcall |
1306 | popq %rsp | 1306 | popq %rsp |
1307 | CFI_DEF_CFA_REGISTER rsp | 1307 | CFI_DEF_CFA_REGISTER rsp |
1308 | decl %gs:pda_irqcount | 1308 | decl PER_CPU_VAR(irq_count) |
1309 | jmp error_exit | 1309 | jmp error_exit |
1310 | CFI_ENDPROC | 1310 | CFI_ENDPROC |
1311 | END(do_hypervisor_callback) | 1311 | END(do_hypervisor_callback) |