diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 20:20:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 20:20:50 -0400 |
commit | c7b228adcafe5024a60fc246476e11af8699b759 (patch) | |
tree | 24282d63edec0393b7a5202a891f670bb826952e | |
parent | 708d0b41a26907ac83cde41dd5a75b5a2f8f1218 (diff) | |
parent | 6f46b3aef0031c08a7b439d63013dad2aeb093b2 (diff) |
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 FPU updates from Ingo Molnar:
"x86 FPU handling fixes, cleanups and enhancements from Oleg.
The signal handling race fix and the __restore_xstate_sig() preemption
fix for eager-mode is marked for -stable as well"
* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86: copy_thread: Don't nullify ->ptrace_bps twice
x86, fpu: Shift "fpu_counter = 0" from copy_thread() to arch_dup_task_struct()
x86, fpu: copy_process: Sanitize fpu->last_cpu initialization
x86, fpu: copy_process: Avoid fpu_alloc/copy if !used_math()
x86, fpu: Change __thread_fpu_begin() to use use_eager_fpu()
x86, fpu: __restore_xstate_sig()->math_state_restore() needs preempt_disable()
x86, fpu: shift drop_init_fpu() from save_xstate_sig() to handle_signal()
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 7 |
6 files changed, 20 insertions, 19 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 412ececa00b9..e97622f57722 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -344,7 +344,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk) | |||
344 | 344 | ||
345 | static inline void __thread_fpu_begin(struct task_struct *tsk) | 345 | static inline void __thread_fpu_begin(struct task_struct *tsk) |
346 | { | 346 | { |
347 | if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU)) | 347 | if (!use_eager_fpu()) |
348 | clts(); | 348 | clts(); |
349 | __thread_set_has_fpu(tsk); | 349 | __thread_set_has_fpu(tsk); |
350 | } | 350 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f804dc935d2a..e127ddaa2d5a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -64,14 +64,16 @@ EXPORT_SYMBOL_GPL(task_xstate_cachep); | |||
64 | */ | 64 | */ |
65 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 65 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
66 | { | 66 | { |
67 | int ret; | ||
68 | |||
69 | *dst = *src; | 67 | *dst = *src; |
70 | if (fpu_allocated(&src->thread.fpu)) { | 68 | |
71 | memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); | 69 | dst->thread.fpu_counter = 0; |
72 | ret = fpu_alloc(&dst->thread.fpu); | 70 | dst->thread.fpu.has_fpu = 0; |
73 | if (ret) | 71 | dst->thread.fpu.last_cpu = ~0; |
74 | return ret; | 72 | dst->thread.fpu.state = NULL; |
73 | if (tsk_used_math(src)) { | ||
74 | int err = fpu_alloc(&dst->thread.fpu); | ||
75 | if (err) | ||
76 | return err; | ||
75 | fpu_copy(dst, src); | 77 | fpu_copy(dst, src); |
76 | } | 78 | } |
77 | return 0; | 79 | return 0; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 7bc86bbe7485..8f3ebfe710d0 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -138,6 +138,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
138 | 138 | ||
139 | p->thread.sp = (unsigned long) childregs; | 139 | p->thread.sp = (unsigned long) childregs; |
140 | p->thread.sp0 = (unsigned long) (childregs+1); | 140 | p->thread.sp0 = (unsigned long) (childregs+1); |
141 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
141 | 142 | ||
142 | if (unlikely(p->flags & PF_KTHREAD)) { | 143 | if (unlikely(p->flags & PF_KTHREAD)) { |
143 | /* kernel thread */ | 144 | /* kernel thread */ |
@@ -152,9 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
152 | childregs->orig_ax = -1; | 153 | childregs->orig_ax = -1; |
153 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); | 154 | childregs->cs = __KERNEL_CS | get_kernel_rpl(); |
154 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; | 155 | childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; |
155 | p->thread.fpu_counter = 0; | ||
156 | p->thread.io_bitmap_ptr = NULL; | 156 | p->thread.io_bitmap_ptr = NULL; |
157 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
158 | return 0; | 157 | return 0; |
159 | } | 158 | } |
160 | *childregs = *current_pt_regs(); | 159 | *childregs = *current_pt_regs(); |
@@ -165,13 +164,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
165 | p->thread.ip = (unsigned long) ret_from_fork; | 164 | p->thread.ip = (unsigned long) ret_from_fork; |
166 | task_user_gs(p) = get_user_gs(current_pt_regs()); | 165 | task_user_gs(p) = get_user_gs(current_pt_regs()); |
167 | 166 | ||
168 | p->thread.fpu_counter = 0; | ||
169 | p->thread.io_bitmap_ptr = NULL; | 167 | p->thread.io_bitmap_ptr = NULL; |
170 | tsk = current; | 168 | tsk = current; |
171 | err = -ENOMEM; | 169 | err = -ENOMEM; |
172 | 170 | ||
173 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
174 | |||
175 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { | 171 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
176 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, | 172 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
177 | IO_BITMAP_BYTES, GFP_KERNEL); | 173 | IO_BITMAP_BYTES, GFP_KERNEL); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ca5b02d405c3..3ed4a68d4013 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -163,7 +163,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
163 | p->thread.sp = (unsigned long) childregs; | 163 | p->thread.sp = (unsigned long) childregs; |
164 | p->thread.usersp = me->thread.usersp; | 164 | p->thread.usersp = me->thread.usersp; |
165 | set_tsk_thread_flag(p, TIF_FORK); | 165 | set_tsk_thread_flag(p, TIF_FORK); |
166 | p->thread.fpu_counter = 0; | ||
167 | p->thread.io_bitmap_ptr = NULL; | 166 | p->thread.io_bitmap_ptr = NULL; |
168 | 167 | ||
169 | savesegment(gs, p->thread.gsindex); | 168 | savesegment(gs, p->thread.gsindex); |
@@ -193,8 +192,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
193 | childregs->sp = sp; | 192 | childregs->sp = sp; |
194 | 193 | ||
195 | err = -ENOMEM; | 194 | err = -ENOMEM; |
196 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | ||
197 | |||
198 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { | 195 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { |
199 | p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, | 196 | p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, |
200 | IO_BITMAP_BYTES, GFP_KERNEL); | 197 | IO_BITMAP_BYTES, GFP_KERNEL); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 2851d63c1202..ed37a768d0fc 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
675 | * handler too. | 675 | * handler too. |
676 | */ | 676 | */ |
677 | regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); | 677 | regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); |
678 | /* | ||
679 | * Ensure the signal handler starts with the new fpu state. | ||
680 | */ | ||
681 | if (used_math()) | ||
682 | drop_init_fpu(current); | ||
678 | } | 683 | } |
679 | signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); | 684 | signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); |
680 | } | 685 | } |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 940b142cc11f..4c540c4719d8 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -271,8 +271,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
271 | if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) | 271 | if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) |
272 | return -1; | 272 | return -1; |
273 | 273 | ||
274 | drop_init_fpu(tsk); /* trigger finit */ | ||
275 | |||
276 | return 0; | 274 | return 0; |
277 | } | 275 | } |
278 | 276 | ||
@@ -402,8 +400,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
402 | set_used_math(); | 400 | set_used_math(); |
403 | } | 401 | } |
404 | 402 | ||
405 | if (use_eager_fpu()) | 403 | if (use_eager_fpu()) { |
404 | preempt_disable(); | ||
406 | math_state_restore(); | 405 | math_state_restore(); |
406 | preempt_enable(); | ||
407 | } | ||
407 | 408 | ||
408 | return err; | 409 | return err; |
409 | } else { | 410 | } else { |