diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 16:16:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 16:16:36 -0400 |
commit | 60f898eeaaa1c5d0162a4240bacf33a6c87ecef6 (patch) | |
tree | 23eeac4b1e9a616779d22c104dbc8bd45dfeefd1 /arch/x86/xen | |
parent | 977e1ba50893c15121557b39de586901fe3f75cf (diff) | |
parent | 3b75232d55680ca166dffa274d0587d5faf0a016 (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm changes from Ingo Molnar:
"There were lots of changes in this development cycle:
- over 100 separate cleanups, restructuring changes, speedups and
fixes in the x86 system call, irq, trap and other entry code, part
of a heroic effort to deobfuscate a decade old spaghetti asm code
and its C code dependencies (Denys Vlasenko, Andy Lutomirski)
- alternatives code fixes and enhancements (Borislav Petkov)
- simplifications and cleanups to the compat code (Brian Gerst)
- signal handling fixes and new x86 testcases (Andy Lutomirski)
- various other fixes and cleanups
By their nature many of these changes are risky - we tried to test
them well on many different x86 systems (there are no known
regressions), and they are split up finely to help bisection - but
there's still a fair bit of residual risk left so caveat emptor"
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (148 commits)
perf/x86/64: Report regs_user->ax too in get_regs_user()
perf/x86/64: Simplify regs_user->abi setting code in get_regs_user()
perf/x86/64: Do report user_regs->cx while we are in syscall, in get_regs_user()
perf/x86/64: Do not guess user_regs->cs, ss, sp in get_regs_user()
x86/asm/entry/32: Tidy up JNZ instructions after TESTs
x86/asm/entry/64: Reduce padding in execve stubs
x86/asm/entry/64: Remove GET_THREAD_INFO() in ret_from_fork
x86/asm/entry/64: Simplify jumps in ret_from_fork
x86/asm/entry/64: Remove a redundant jump
x86/asm/entry/64: Optimize [v]fork/clone stubs
x86/asm/entry: Zero EXTRA_REGS for stub32_execve() too
x86/asm/entry/64: Move stub_x32_execvecloser() to stub_execveat()
x86/asm/entry/64: Use common code for rt_sigreturn() epilogue
x86/asm/entry/64: Add forgotten CFI annotation
x86/asm/entry/irq: Simplify interrupt dispatch table (IDT) layout
x86/asm/entry/64: Move opportunistic sysret code to syscall code path
x86, selftests: Add sigreturn selftest
x86/alternatives: Guard NOPs optimization
x86/asm/entry: Clear EXTRA_REGS for all executable formats
x86/signal: Remove pax argument from restore_sigcontext
...
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 14 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 8 |
3 files changed, 6 insertions, 17 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5240f563076d..81665c9f2132 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -912,6 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss, | |||
912 | mcs = xen_mc_entry(0); | 912 | mcs = xen_mc_entry(0); |
913 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); | 913 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); |
914 | xen_mc_issue(PARAVIRT_LAZY_CPU); | 914 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
915 | tss->x86_tss.sp0 = thread->sp0; | ||
915 | } | 916 | } |
916 | 917 | ||
917 | static void xen_set_iopl_mask(unsigned mask) | 918 | static void xen_set_iopl_mask(unsigned mask) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 08e8489c47f1..7413ee3706d0 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -445,15 +445,7 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) | |||
445 | { | 445 | { |
446 | int rc; | 446 | int rc; |
447 | 447 | ||
448 | per_cpu(current_task, cpu) = idle; | 448 | common_cpu_up(cpu, idle); |
449 | #ifdef CONFIG_X86_32 | ||
450 | irq_ctx_init(cpu); | ||
451 | #else | ||
452 | clear_tsk_thread_flag(idle, TIF_FORK); | ||
453 | #endif | ||
454 | per_cpu(kernel_stack, cpu) = | ||
455 | (unsigned long)task_stack_page(idle) - | ||
456 | KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
457 | 449 | ||
458 | xen_setup_runstate_info(cpu); | 450 | xen_setup_runstate_info(cpu); |
459 | xen_setup_timer(cpu); | 451 | xen_setup_timer(cpu); |
@@ -468,10 +460,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) | |||
468 | if (rc) | 460 | if (rc) |
469 | return rc; | 461 | return rc; |
470 | 462 | ||
471 | if (num_online_cpus() == 1) | ||
472 | /* Just in case we booted with a single CPU. */ | ||
473 | alternatives_enable_smp(); | ||
474 | |||
475 | rc = xen_smp_intr_init(cpu); | 463 | rc = xen_smp_intr_init(cpu); |
476 | if (rc) | 464 | if (rc) |
477 | return rc; | 465 | return rc; |
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 53adefda4275..985fc3ee0973 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -68,11 +68,11 @@ ENTRY(xen_sysret64) | |||
68 | * We're already on the usermode stack at this point, but | 68 | * We're already on the usermode stack at this point, but |
69 | * still with the kernel gs, so we can easily switch back | 69 | * still with the kernel gs, so we can easily switch back |
70 | */ | 70 | */ |
71 | movq %rsp, PER_CPU_VAR(old_rsp) | 71 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
72 | movq PER_CPU_VAR(kernel_stack), %rsp | 72 | movq PER_CPU_VAR(kernel_stack), %rsp |
73 | 73 | ||
74 | pushq $__USER_DS | 74 | pushq $__USER_DS |
75 | pushq PER_CPU_VAR(old_rsp) | 75 | pushq PER_CPU_VAR(rsp_scratch) |
76 | pushq %r11 | 76 | pushq %r11 |
77 | pushq $__USER_CS | 77 | pushq $__USER_CS |
78 | pushq %rcx | 78 | pushq %rcx |
@@ -87,11 +87,11 @@ ENTRY(xen_sysret32) | |||
87 | * We're already on the usermode stack at this point, but | 87 | * We're already on the usermode stack at this point, but |
88 | * still with the kernel gs, so we can easily switch back | 88 | * still with the kernel gs, so we can easily switch back |
89 | */ | 89 | */ |
90 | movq %rsp, PER_CPU_VAR(old_rsp) | 90 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
91 | movq PER_CPU_VAR(kernel_stack), %rsp | 91 | movq PER_CPU_VAR(kernel_stack), %rsp |
92 | 92 | ||
93 | pushq $__USER32_DS | 93 | pushq $__USER32_DS |
94 | pushq PER_CPU_VAR(old_rsp) | 94 | pushq PER_CPU_VAR(rsp_scratch) |
95 | pushq %r11 | 95 | pushq %r11 |
96 | pushq $__USER32_CS | 96 | pushq $__USER32_CS |
97 | pushq %rcx | 97 | pushq %rcx |