diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2010-07-19 19:05:49 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2010-07-19 20:51:30 -0400 |
commit | 29104e101d710dd152f807978884643a52eca8b7 (patch) | |
tree | 03ab625528d91ad13d60a817c7885591fea197b3 | |
parent | a1488f8bf4d72ad724700f6e982469a1240e4264 (diff) |
x86, xsave: Sync xsave memory layout with its header for user handling
With xsaveopt, if a processor implementation discern that a processor state
component is in its initialized state it may modify the corresponding bit in
the xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory
layout. Hence wHile presenting the xstate information to the user, we always
ensure that the memory layout of a feature will be in the init state if the
corresponding header bit is zero. This ensures the consistency and avoids the
condition of the user seeing some some stale state in the memory layout during
signal handling, debugging etc.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <20100719230205.351459480@sbs-t61.sc.intel.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/include/asm/i387.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/xsave.h | 10 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 89 |
4 files changed, 123 insertions, 1 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c991b3a7b904..bb370fd0a1c2 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -58,11 +58,25 @@ extern int restore_i387_xstate_ia32(void __user *buf); | |||
58 | 58 | ||
59 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 59 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
60 | 60 | ||
61 | static __always_inline __pure bool use_xsaveopt(void) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
61 | static __always_inline __pure bool use_xsave(void) | 66 | static __always_inline __pure bool use_xsave(void) |
62 | { | 67 | { |
63 | return static_cpu_has(X86_FEATURE_XSAVE); | 68 | return static_cpu_has(X86_FEATURE_XSAVE); |
64 | } | 69 | } |
65 | 70 | ||
71 | extern void __sanitize_i387_state(struct task_struct *); | ||
72 | |||
73 | static inline void sanitize_i387_state(struct task_struct *tsk) | ||
74 | { | ||
75 | if (!use_xsaveopt()) | ||
76 | return; | ||
77 | __sanitize_i387_state(tsk); | ||
78 | } | ||
79 | |||
66 | #ifdef CONFIG_X86_64 | 80 | #ifdef CONFIG_X86_64 |
67 | 81 | ||
68 | /* Ignore delayed exceptions from user space */ | 82 | /* Ignore delayed exceptions from user space */ |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 2c4390cae228..0c72adc0cb15 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -111,6 +111,16 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | |||
111 | : "memory"); | 111 | : "memory"); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void xsave_state(struct xsave_struct *fx, u64 mask) | ||
115 | { | ||
116 | u32 lmask = mask; | ||
117 | u32 hmask = mask >> 32; | ||
118 | |||
119 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t" | ||
120 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
121 | : "memory"); | ||
122 | } | ||
123 | |||
114 | static inline void fpu_xsave(struct fpu *fpu) | 124 | static inline void fpu_xsave(struct fpu *fpu) |
115 | { | 125 | { |
116 | /* This, however, we can work around by forcing the compiler to select | 126 | /* This, however, we can work around by forcing the compiler to select |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 86cef6b32253..6106af9fd129 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -190,6 +190,8 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
190 | if (ret) | 190 | if (ret) |
191 | return ret; | 191 | return ret; |
192 | 192 | ||
193 | sanitize_i387_state(target); | ||
194 | |||
193 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 195 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
194 | &target->thread.fpu.state->fxsave, 0, -1); | 196 | &target->thread.fpu.state->fxsave, 0, -1); |
195 | } | 197 | } |
@@ -207,6 +209,8 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
207 | if (ret) | 209 | if (ret) |
208 | return ret; | 210 | return ret; |
209 | 211 | ||
212 | sanitize_i387_state(target); | ||
213 | |||
210 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 214 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
211 | &target->thread.fpu.state->fxsave, 0, -1); | 215 | &target->thread.fpu.state->fxsave, 0, -1); |
212 | 216 | ||
@@ -446,6 +450,8 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
446 | -1); | 450 | -1); |
447 | } | 451 | } |
448 | 452 | ||
453 | sanitize_i387_state(target); | ||
454 | |||
449 | if (kbuf && pos == 0 && count == sizeof(env)) { | 455 | if (kbuf && pos == 0 && count == sizeof(env)) { |
450 | convert_from_fxsr(kbuf, target); | 456 | convert_from_fxsr(kbuf, target); |
451 | return 0; | 457 | return 0; |
@@ -467,6 +473,8 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
467 | if (ret) | 473 | if (ret) |
468 | return ret; | 474 | return ret; |
469 | 475 | ||
476 | sanitize_i387_state(target); | ||
477 | |||
470 | if (!HAVE_HWFP) | 478 | if (!HAVE_HWFP) |
471 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 479 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
472 | 480 | ||
@@ -533,6 +541,9 @@ static int save_i387_xsave(void __user *buf) | |||
533 | struct _fpstate_ia32 __user *fx = buf; | 541 | struct _fpstate_ia32 __user *fx = buf; |
534 | int err = 0; | 542 | int err = 0; |
535 | 543 | ||
544 | |||
545 | sanitize_i387_state(tsk); | ||
546 | |||
536 | /* | 547 | /* |
537 | * For legacy compatible, we always set FP/SSE bits in the bit | 548 | * For legacy compatible, we always set FP/SSE bits in the bit |
538 | * vector while saving the state to the user context. | 549 | * vector while saving the state to the user context. |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 4993caa4181c..368047c8d507 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -24,6 +24,76 @@ struct _fpx_sw_bytes fx_sw_reserved_ia32; | |||
24 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; | 24 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * If a processor implementation discern that a processor state component is | ||
28 | * in its initialized state it may modify the corresponding bit in the | ||
29 | * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory | ||
30 | * layout in the case of xsaveopt. While presenting the xstate information to | ||
31 | * the user, we always ensure that the memory layout of a feature will be in | ||
32 | * the init state if the corresponding header bit is zero. This is to ensure | ||
33 | * that the user doesn't see some stale state in the memory layout during | ||
34 | * signal handling, debugging etc. | ||
35 | */ | ||
36 | void __sanitize_i387_state(struct task_struct *tsk) | ||
37 | { | ||
38 | u64 xstate_bv; | ||
39 | int feature_bit = 0x2; | ||
40 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; | ||
41 | |||
42 | if (!fx) | ||
43 | return; | ||
44 | |||
45 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | ||
46 | |||
47 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | ||
48 | |||
49 | /* | ||
50 | * None of the feature bits are in init state. So nothing else | ||
51 | * to do for us, as the memory layout is upto date. | ||
52 | */ | ||
53 | if ((xstate_bv & pcntxt_mask) == pcntxt_mask) | ||
54 | return; | ||
55 | |||
56 | /* | ||
57 | * FP is in init state | ||
58 | */ | ||
59 | if (!(xstate_bv & XSTATE_FP)) { | ||
60 | fx->cwd = 0x37f; | ||
61 | fx->swd = 0; | ||
62 | fx->twd = 0; | ||
63 | fx->fop = 0; | ||
64 | fx->rip = 0; | ||
65 | fx->rdp = 0; | ||
66 | memset(&fx->st_space[0], 0, 128); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * SSE is in init state | ||
71 | */ | ||
72 | if (!(xstate_bv & XSTATE_SSE)) | ||
73 | memset(&fx->xmm_space[0], 0, 256); | ||
74 | |||
75 | xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2; | ||
76 | |||
77 | /* | ||
78 | * Update all the other memory layouts for which the corresponding | ||
79 | * header bit is in the init state. | ||
80 | */ | ||
81 | while (xstate_bv) { | ||
82 | if (xstate_bv & 0x1) { | ||
83 | int offset = xstate_offsets[feature_bit]; | ||
84 | int size = xstate_sizes[feature_bit]; | ||
85 | |||
86 | memcpy(((void *) fx) + offset, | ||
87 | ((void *) init_xstate_buf) + offset, | ||
88 | size); | ||
89 | } | ||
90 | |||
91 | xstate_bv >>= 1; | ||
92 | feature_bit++; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* | ||
27 | * Check for the presence of extended state information in the | 97 | * Check for the presence of extended state information in the |
28 | * user fpstate pointer in the sigcontext. | 98 | * user fpstate pointer in the sigcontext. |
29 | */ | 99 | */ |
@@ -112,6 +182,7 @@ int save_i387_xstate(void __user *buf) | |||
112 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 182 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
113 | stts(); | 183 | stts(); |
114 | } else { | 184 | } else { |
185 | sanitize_i387_state(tsk); | ||
115 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 186 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
116 | xstate_size)) | 187 | xstate_size)) |
117 | return -1; | 188 | return -1; |
@@ -333,10 +404,26 @@ static void setup_xstate_features(void) | |||
333 | */ | 404 | */ |
334 | static void __init setup_xstate_init(void) | 405 | static void __init setup_xstate_init(void) |
335 | { | 406 | { |
407 | setup_xstate_features(); | ||
408 | |||
409 | /* | ||
410 | * Setup init_xstate_buf to represent the init state of | ||
411 | * all the features managed by the xsave | ||
412 | */ | ||
336 | init_xstate_buf = alloc_bootmem(xstate_size); | 413 | init_xstate_buf = alloc_bootmem(xstate_size); |
337 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | 414 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; |
338 | 415 | ||
339 | setup_xstate_features(); | 416 | clts(); |
417 | /* | ||
418 | * Init all the features state with header_bv being 0x0 | ||
419 | */ | ||
420 | xrstor_state(init_xstate_buf, -1); | ||
421 | /* | ||
422 | * Dump the init state again. This is to identify the init state | ||
423 | * of any feature which is not represented by all zero's. | ||
424 | */ | ||
425 | xsave_state(init_xstate_buf, -1); | ||
426 | stts(); | ||
340 | } | 427 | } |
341 | 428 | ||
342 | /* | 429 | /* |