diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
commit | af37501c792107c2bde1524bdae38d9a247b841a (patch) | |
tree | b50ee90d29e72956b8b7d8d19677fe5996755d49 /arch/x86/kernel/entry_64.S | |
parent | d859e29fe34cb833071b20aef860ee94fbad9bb2 (diff) | |
parent | 99937d6455cea95405ac681c86a857d0fcd530bd (diff) |
Merge branch 'core/percpu' into perfcounters/core
Conflicts:
arch/x86/include/asm/pda.h
We merge tip/core/percpu into tip/perfcounters/core because of a
semantic and contextual conflict: the former eliminates the PDA,
while the latter extends it with apic_perf_irqs field.
Resolve the conflict by moving the new field to the irq_cpustat
structure on 64-bit too.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1954a9662203..c092e7d2686d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/irqflags.h> | 52 | #include <asm/irqflags.h> |
53 | #include <asm/paravirt.h> | 53 | #include <asm/paravirt.h> |
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | #include <asm/percpu.h> | ||
55 | 56 | ||
56 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
57 | #include <linux/elf-em.h> | 58 | #include <linux/elf-em.h> |
@@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64) | |||
209 | 210 | ||
210 | /* %rsp:at FRAMEEND */ | 211 | /* %rsp:at FRAMEEND */ |
211 | .macro FIXUP_TOP_OF_STACK tmp offset=0 | 212 | .macro FIXUP_TOP_OF_STACK tmp offset=0 |
212 | movq %gs:pda_oldrsp,\tmp | 213 | movq PER_CPU_VAR(old_rsp),\tmp |
213 | movq \tmp,RSP+\offset(%rsp) | 214 | movq \tmp,RSP+\offset(%rsp) |
214 | movq $__USER_DS,SS+\offset(%rsp) | 215 | movq $__USER_DS,SS+\offset(%rsp) |
215 | movq $__USER_CS,CS+\offset(%rsp) | 216 | movq $__USER_CS,CS+\offset(%rsp) |
@@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64) | |||
220 | 221 | ||
221 | .macro RESTORE_TOP_OF_STACK tmp offset=0 | 222 | .macro RESTORE_TOP_OF_STACK tmp offset=0 |
222 | movq RSP+\offset(%rsp),\tmp | 223 | movq RSP+\offset(%rsp),\tmp |
223 | movq \tmp,%gs:pda_oldrsp | 224 | movq \tmp,PER_CPU_VAR(old_rsp) |
224 | movq EFLAGS+\offset(%rsp),\tmp | 225 | movq EFLAGS+\offset(%rsp),\tmp |
225 | movq \tmp,R11+\offset(%rsp) | 226 | movq \tmp,R11+\offset(%rsp) |
226 | .endm | 227 | .endm |
@@ -336,15 +337,15 @@ ENTRY(save_args) | |||
336 | je 1f | 337 | je 1f |
337 | SWAPGS | 338 | SWAPGS |
338 | /* | 339 | /* |
339 | * irqcount is used to check if a CPU is already on an interrupt stack | 340 | * irq_count is used to check if a CPU is already on an interrupt stack |
340 | * or not. While this is essentially redundant with preempt_count it is | 341 | * or not. While this is essentially redundant with preempt_count it is |
341 | * a little cheaper to use a separate counter in the PDA (short of | 342 | * a little cheaper to use a separate counter in the PDA (short of |
342 | * moving irq_enter into assembly, which would be too much work) | 343 | * moving irq_enter into assembly, which would be too much work) |
343 | */ | 344 | */ |
344 | 1: incl %gs:pda_irqcount | 345 | 1: incl PER_CPU_VAR(irq_count) |
345 | jne 2f | 346 | jne 2f |
346 | popq_cfi %rax /* move return address... */ | 347 | popq_cfi %rax /* move return address... */ |
347 | mov %gs:pda_irqstackptr,%rsp | 348 | mov PER_CPU_VAR(irq_stack_ptr),%rsp |
348 | EMPTY_FRAME 0 | 349 | EMPTY_FRAME 0 |
349 | pushq_cfi %rax /* ... to the new stack */ | 350 | pushq_cfi %rax /* ... to the new stack */ |
350 | /* | 351 | /* |
@@ -467,7 +468,7 @@ END(ret_from_fork) | |||
467 | ENTRY(system_call) | 468 | ENTRY(system_call) |
468 | CFI_STARTPROC simple | 469 | CFI_STARTPROC simple |
469 | CFI_SIGNAL_FRAME | 470 | CFI_SIGNAL_FRAME |
470 | CFI_DEF_CFA rsp,PDA_STACKOFFSET | 471 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET |
471 | CFI_REGISTER rip,rcx | 472 | CFI_REGISTER rip,rcx |
472 | /*CFI_REGISTER rflags,r11*/ | 473 | /*CFI_REGISTER rflags,r11*/ |
473 | SWAPGS_UNSAFE_STACK | 474 | SWAPGS_UNSAFE_STACK |
@@ -478,8 +479,8 @@ ENTRY(system_call) | |||
478 | */ | 479 | */ |
479 | ENTRY(system_call_after_swapgs) | 480 | ENTRY(system_call_after_swapgs) |
480 | 481 | ||
481 | movq %rsp,%gs:pda_oldrsp | 482 | movq %rsp,PER_CPU_VAR(old_rsp) |
482 | movq %gs:pda_kernelstack,%rsp | 483 | movq PER_CPU_VAR(kernel_stack),%rsp |
483 | /* | 484 | /* |
484 | * No need to follow this irqs off/on section - it's straight | 485 | * No need to follow this irqs off/on section - it's straight |
485 | * and short: | 486 | * and short: |
@@ -522,7 +523,7 @@ sysret_check: | |||
522 | CFI_REGISTER rip,rcx | 523 | CFI_REGISTER rip,rcx |
523 | RESTORE_ARGS 0,-ARG_SKIP,1 | 524 | RESTORE_ARGS 0,-ARG_SKIP,1 |
524 | /*CFI_REGISTER rflags,r11*/ | 525 | /*CFI_REGISTER rflags,r11*/ |
525 | movq %gs:pda_oldrsp, %rsp | 526 | movq PER_CPU_VAR(old_rsp), %rsp |
526 | USERGS_SYSRET64 | 527 | USERGS_SYSRET64 |
527 | 528 | ||
528 | CFI_RESTORE_STATE | 529 | CFI_RESTORE_STATE |
@@ -832,11 +833,11 @@ common_interrupt: | |||
832 | XCPT_FRAME | 833 | XCPT_FRAME |
833 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ | 834 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
834 | interrupt do_IRQ | 835 | interrupt do_IRQ |
835 | /* 0(%rsp): oldrsp-ARGOFFSET */ | 836 | /* 0(%rsp): old_rsp-ARGOFFSET */ |
836 | ret_from_intr: | 837 | ret_from_intr: |
837 | DISABLE_INTERRUPTS(CLBR_NONE) | 838 | DISABLE_INTERRUPTS(CLBR_NONE) |
838 | TRACE_IRQS_OFF | 839 | TRACE_IRQS_OFF |
839 | decl %gs:pda_irqcount | 840 | decl PER_CPU_VAR(irq_count) |
840 | leaveq | 841 | leaveq |
841 | CFI_DEF_CFA_REGISTER rsp | 842 | CFI_DEF_CFA_REGISTER rsp |
842 | CFI_ADJUST_CFA_OFFSET -8 | 843 | CFI_ADJUST_CFA_OFFSET -8 |
@@ -1077,10 +1078,10 @@ ENTRY(\sym) | |||
1077 | TRACE_IRQS_OFF | 1078 | TRACE_IRQS_OFF |
1078 | movq %rsp,%rdi /* pt_regs pointer */ | 1079 | movq %rsp,%rdi /* pt_regs pointer */ |
1079 | xorl %esi,%esi /* no error code */ | 1080 | xorl %esi,%esi /* no error code */ |
1080 | movq %gs:pda_data_offset, %rbp | 1081 | PER_CPU(init_tss, %rbp) |
1081 | subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) | 1082 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) |
1082 | call \do_sym | 1083 | call \do_sym |
1083 | addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) | 1084 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) |
1084 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1085 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1085 | CFI_ENDPROC | 1086 | CFI_ENDPROC |
1086 | END(\sym) | 1087 | END(\sym) |
@@ -1264,14 +1265,14 @@ ENTRY(call_softirq) | |||
1264 | CFI_REL_OFFSET rbp,0 | 1265 | CFI_REL_OFFSET rbp,0 |
1265 | mov %rsp,%rbp | 1266 | mov %rsp,%rbp |
1266 | CFI_DEF_CFA_REGISTER rbp | 1267 | CFI_DEF_CFA_REGISTER rbp |
1267 | incl %gs:pda_irqcount | 1268 | incl PER_CPU_VAR(irq_count) |
1268 | cmove %gs:pda_irqstackptr,%rsp | 1269 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
1269 | push %rbp # backlink for old unwinder | 1270 | push %rbp # backlink for old unwinder |
1270 | call __do_softirq | 1271 | call __do_softirq |
1271 | leaveq | 1272 | leaveq |
1272 | CFI_DEF_CFA_REGISTER rsp | 1273 | CFI_DEF_CFA_REGISTER rsp |
1273 | CFI_ADJUST_CFA_OFFSET -8 | 1274 | CFI_ADJUST_CFA_OFFSET -8 |
1274 | decl %gs:pda_irqcount | 1275 | decl PER_CPU_VAR(irq_count) |
1275 | ret | 1276 | ret |
1276 | CFI_ENDPROC | 1277 | CFI_ENDPROC |
1277 | END(call_softirq) | 1278 | END(call_softirq) |
@@ -1301,15 +1302,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1301 | movq %rdi, %rsp # we don't return, adjust the stack frame | 1302 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1302 | CFI_ENDPROC | 1303 | CFI_ENDPROC |
1303 | DEFAULT_FRAME | 1304 | DEFAULT_FRAME |
1304 | 11: incl %gs:pda_irqcount | 1305 | 11: incl PER_CPU_VAR(irq_count) |
1305 | movq %rsp,%rbp | 1306 | movq %rsp,%rbp |
1306 | CFI_DEF_CFA_REGISTER rbp | 1307 | CFI_DEF_CFA_REGISTER rbp |
1307 | cmovzq %gs:pda_irqstackptr,%rsp | 1308 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
1308 | pushq %rbp # backlink for old unwinder | 1309 | pushq %rbp # backlink for old unwinder |
1309 | call xen_evtchn_do_upcall | 1310 | call xen_evtchn_do_upcall |
1310 | popq %rsp | 1311 | popq %rsp |
1311 | CFI_DEF_CFA_REGISTER rsp | 1312 | CFI_DEF_CFA_REGISTER rsp |
1312 | decl %gs:pda_irqcount | 1313 | decl PER_CPU_VAR(irq_count) |
1313 | jmp error_exit | 1314 | jmp error_exit |
1314 | CFI_ENDPROC | 1315 | CFI_ENDPROC |
1315 | END(do_hypervisor_callback) | 1316 | END(do_hypervisor_callback) |