aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 28ad9f4d8b94..e7e35219b32f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -20,7 +20,6 @@
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/i387.h> 22#include <asm/i387.h>
23#include <asm/ds.h>
24#include <asm/debugreg.h> 23#include <asm/debugreg.h>
25 24
26unsigned long idle_halt; 25unsigned long idle_halt;
@@ -32,26 +31,22 @@ struct kmem_cache *task_xstate_cachep;
32 31
33int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 32int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
34{ 33{
34 int ret;
35
35 *dst = *src; 36 *dst = *src;
36 if (src->thread.xstate) { 37 if (fpu_allocated(&src->thread.fpu)) {
37 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, 38 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
38 GFP_KERNEL); 39 ret = fpu_alloc(&dst->thread.fpu);
39 if (!dst->thread.xstate) 40 if (ret)
40 return -ENOMEM; 41 return ret;
41 WARN_ON((unsigned long)dst->thread.xstate & 15); 42 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
42 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
43 } 43 }
44 return 0; 44 return 0;
45} 45}
46 46
47void free_thread_xstate(struct task_struct *tsk) 47void free_thread_xstate(struct task_struct *tsk)
48{ 48{
49 if (tsk->thread.xstate) { 49 fpu_free(&tsk->thread.fpu);
50 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
51 tsk->thread.xstate = NULL;
52 }
53
54 WARN(tsk->thread.ds_ctx, "leaking DS context\n");
55} 50}
56 51
57void free_thread_info(struct thread_info *ti) 52void free_thread_info(struct thread_info *ti)
@@ -198,11 +193,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
198 prev = &prev_p->thread; 193 prev = &prev_p->thread;
199 next = &next_p->thread; 194 next = &next_p->thread;
200 195
201 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) || 196 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
202 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR)) 197 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
203 ds_switch_to(prev_p, next_p); 198 unsigned long debugctl = get_debugctlmsr();
204 else if (next->debugctlmsr != prev->debugctlmsr) 199
205 update_debugctlmsr(next->debugctlmsr); 200 debugctl &= ~DEBUGCTLMSR_BTF;
201 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
202 debugctl |= DEBUGCTLMSR_BTF;
203
204 update_debugctlmsr(debugctl);
205 }
206 206
207 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 207 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
208 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 208 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
@@ -546,11 +546,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
546 * check OSVW bit for CPUs that are not affected 546 * check OSVW bit for CPUs that are not affected
547 * by erratum #400 547 * by erratum #400
548 */ 548 */
549 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); 549 if (cpu_has(c, X86_FEATURE_OSVW)) {
550 if (val >= 2) { 550 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
551 rdmsrl(MSR_AMD64_OSVW_STATUS, val); 551 if (val >= 2) {
552 if (!(val & BIT(1))) 552 rdmsrl(MSR_AMD64_OSVW_STATUS, val);
553 goto no_c1e_idle; 553 if (!(val & BIT(1)))
554 goto no_c1e_idle;
555 }
554 } 556 }
555 return 1; 557 return 1;
556 } 558 }