aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-18 10:38:58 -0500
committerTejun Heo <tj@kernel.org>2009-01-18 10:38:58 -0500
commit5689553076c4a67b83426b076082c63085b7567a (patch)
tree982dac3bceff2dbdb7b962c24b068b5f6e72ccd1 /arch/x86/kernel/entry_64.S
parent3d1e42a7cf945e289d6ba26159aa0e2b0645401b (diff)
x86-64: Move irqcount from PDA to per-cpu.
tj: s/irqcount/irq_count/ Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7c27da407da7..c52b60919163 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -337,12 +337,12 @@ ENTRY(save_args)
337 je 1f 337 je 1f
338 SWAPGS 338 SWAPGS
339 /* 339 /*
340 * irqcount is used to check if a CPU is already on an interrupt stack 340 * irq_count is used to check if a CPU is already on an interrupt stack
341 * or not. While this is essentially redundant with preempt_count it is 341 * or not. While this is essentially redundant with preempt_count it is
342 * a little cheaper to use a separate counter in the PDA (short of 342 * a little cheaper to use a separate counter in the PDA (short of
343 * moving irq_enter into assembly, which would be too much work) 343 * moving irq_enter into assembly, which would be too much work)
344 */ 344 */
3451: incl %gs:pda_irqcount 3451: incl PER_CPU_VAR(irq_count)
346 jne 2f 346 jne 2f
347 popq_cfi %rax /* move return address... */ 347 popq_cfi %rax /* move return address... */
348 mov PER_CPU_VAR(irq_stack_ptr),%rsp 348 mov PER_CPU_VAR(irq_stack_ptr),%rsp
@@ -837,7 +837,7 @@ common_interrupt:
837ret_from_intr: 837ret_from_intr:
838 DISABLE_INTERRUPTS(CLBR_NONE) 838 DISABLE_INTERRUPTS(CLBR_NONE)
839 TRACE_IRQS_OFF 839 TRACE_IRQS_OFF
840 decl %gs:pda_irqcount 840 decl PER_CPU_VAR(irq_count)
841 leaveq 841 leaveq
842 CFI_DEF_CFA_REGISTER rsp 842 CFI_DEF_CFA_REGISTER rsp
843 CFI_ADJUST_CFA_OFFSET -8 843 CFI_ADJUST_CFA_OFFSET -8
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
1260 CFI_REL_OFFSET rbp,0 1260 CFI_REL_OFFSET rbp,0
1261 mov %rsp,%rbp 1261 mov %rsp,%rbp
1262 CFI_DEF_CFA_REGISTER rbp 1262 CFI_DEF_CFA_REGISTER rbp
1263 incl %gs:pda_irqcount 1263 incl PER_CPU_VAR(irq_count)
1264 cmove PER_CPU_VAR(irq_stack_ptr),%rsp 1264 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1265 push %rbp # backlink for old unwinder 1265 push %rbp # backlink for old unwinder
1266 call __do_softirq 1266 call __do_softirq
1267 leaveq 1267 leaveq
1268 CFI_DEF_CFA_REGISTER rsp 1268 CFI_DEF_CFA_REGISTER rsp
1269 CFI_ADJUST_CFA_OFFSET -8 1269 CFI_ADJUST_CFA_OFFSET -8
1270 decl %gs:pda_irqcount 1270 decl PER_CPU_VAR(irq_count)
1271 ret 1271 ret
1272 CFI_ENDPROC 1272 CFI_ENDPROC
1273END(call_softirq) 1273END(call_softirq)
@@ -1297,7 +1297,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1297 movq %rdi, %rsp # we don't return, adjust the stack frame 1297 movq %rdi, %rsp # we don't return, adjust the stack frame
1298 CFI_ENDPROC 1298 CFI_ENDPROC
1299 DEFAULT_FRAME 1299 DEFAULT_FRAME
130011: incl %gs:pda_irqcount 130011: incl PER_CPU_VAR(irq_count)
1301 movq %rsp,%rbp 1301 movq %rsp,%rbp
1302 CFI_DEF_CFA_REGISTER rbp 1302 CFI_DEF_CFA_REGISTER rbp
1303 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 1303 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
@@ -1305,7 +1305,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1305 call xen_evtchn_do_upcall 1305 call xen_evtchn_do_upcall
1306 popq %rsp 1306 popq %rsp
1307 CFI_DEF_CFA_REGISTER rsp 1307 CFI_DEF_CFA_REGISTER rsp
1308 decl %gs:pda_irqcount 1308 decl PER_CPU_VAR(irq_count)
1309 jmp error_exit 1309 jmp error_exit
1310 CFI_ENDPROC 1310 CFI_ENDPROC
1311END(do_hypervisor_callback) 1311END(do_hypervisor_callback)