diff options
-rw-r--r-- | arch/x86/include/asm/i387.h | 15 | ||||
-rw-r--r-- | arch/x86/include/asm/xsave.h | 24 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 39 | ||||
-rw-r--r-- | arch/x86/kernel/xsave.c | 170 |
5 files changed, 222 insertions, 42 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index f1accc625beb..a73a8d5a5e69 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -31,7 +31,6 @@ extern void mxcsr_feature_mask_init(void); | |||
31 | extern int init_fpu(struct task_struct *child); | 31 | extern int init_fpu(struct task_struct *child); |
32 | extern asmlinkage void math_state_restore(void); | 32 | extern asmlinkage void math_state_restore(void); |
33 | extern void __math_state_restore(void); | 33 | extern void __math_state_restore(void); |
34 | extern void init_thread_xstate(void); | ||
35 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
36 | 35 | ||
37 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 36 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
@@ -58,11 +57,25 @@ extern int restore_i387_xstate_ia32(void __user *buf); | |||
58 | 57 | ||
59 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
60 | 59 | ||
60 | static __always_inline __pure bool use_xsaveopt(void) | ||
61 | { | ||
62 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | ||
63 | } | ||
64 | |||
61 | static __always_inline __pure bool use_xsave(void) | 65 | static __always_inline __pure bool use_xsave(void) |
62 | { | 66 | { |
63 | return static_cpu_has(X86_FEATURE_XSAVE); | 67 | return static_cpu_has(X86_FEATURE_XSAVE); |
64 | } | 68 | } |
65 | 69 | ||
70 | extern void __sanitize_i387_state(struct task_struct *); | ||
71 | |||
72 | static inline void sanitize_i387_state(struct task_struct *tsk) | ||
73 | { | ||
74 | if (!use_xsaveopt()) | ||
75 | return; | ||
76 | __sanitize_i387_state(tsk); | ||
77 | } | ||
78 | |||
66 | #ifdef CONFIG_X86_64 | 79 | #ifdef CONFIG_X86_64 |
67 | 80 | ||
68 | /* Ignore delayed exceptions from user space */ | 81 | /* Ignore delayed exceptions from user space */ |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 06acdbd7570a..c6ce2452f10c 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/i387.h> | 6 | |
7 | #define XSTATE_CPUID 0x0000000d | ||
7 | 8 | ||
8 | #define XSTATE_FP 0x1 | 9 | #define XSTATE_FP 0x1 |
9 | #define XSTATE_SSE 0x2 | 10 | #define XSTATE_SSE 0x2 |
@@ -32,10 +33,8 @@ | |||
32 | 33 | ||
33 | extern unsigned int xstate_size; | 34 | extern unsigned int xstate_size; |
34 | extern u64 pcntxt_mask; | 35 | extern u64 pcntxt_mask; |
35 | extern struct xsave_struct *init_xstate_buf; | ||
36 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; | 36 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; |
37 | 37 | ||
38 | extern void xsave_cntxt_init(void); | ||
39 | extern void xsave_init(void); | 38 | extern void xsave_init(void); |
40 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); | 39 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); |
41 | extern int init_fpu(struct task_struct *child); | 40 | extern int init_fpu(struct task_struct *child); |
@@ -127,12 +126,25 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | |||
127 | : "memory"); | 126 | : "memory"); |
128 | } | 127 | } |
129 | 128 | ||
129 | static inline void xsave_state(struct xsave_struct *fx, u64 mask) | ||
130 | { | ||
131 | u32 lmask = mask; | ||
132 | u32 hmask = mask >> 32; | ||
133 | |||
134 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t" | ||
135 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
136 | : "memory"); | ||
137 | } | ||
138 | |||
130 | static inline void fpu_xsave(struct fpu *fpu) | 139 | static inline void fpu_xsave(struct fpu *fpu) |
131 | { | 140 | { |
132 | /* This, however, we can work around by forcing the compiler to select | 141 | /* This, however, we can work around by forcing the compiler to select |
133 | an addressing mode that doesn't require extended registers. */ | 142 | an addressing mode that doesn't require extended registers. */ |
134 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | 143 | alternative_input( |
135 | : : "D" (&(fpu->state->xsave)), | 144 | ".byte " REX_PREFIX "0x0f,0xae,0x27", |
136 | "a" (-1), "d"(-1) : "memory"); | 145 | ".byte " REX_PREFIX "0x0f,0xae,0x37", |
146 | X86_FEATURE_XSAVEOPT, | ||
147 | [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : | ||
148 | "memory"); | ||
137 | } | 149 | } |
138 | #endif | 150 | #endif |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f10273138382..490dac63c2d2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -140,10 +140,18 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | |||
140 | static int __init x86_xsave_setup(char *s) | 140 | static int __init x86_xsave_setup(char *s) |
141 | { | 141 | { |
142 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | 142 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
143 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | ||
143 | return 1; | 144 | return 1; |
144 | } | 145 | } |
145 | __setup("noxsave", x86_xsave_setup); | 146 | __setup("noxsave", x86_xsave_setup); |
146 | 147 | ||
148 | static int __init x86_xsaveopt_setup(char *s) | ||
149 | { | ||
150 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | ||
151 | return 1; | ||
152 | } | ||
153 | __setup("noxsaveopt", x86_xsaveopt_setup); | ||
154 | |||
147 | #ifdef CONFIG_X86_32 | 155 | #ifdef CONFIG_X86_32 |
148 | static int cachesize_override __cpuinitdata = -1; | 156 | static int cachesize_override __cpuinitdata = -1; |
149 | static int disable_x86_serial_nr __cpuinitdata = 1; | 157 | static int disable_x86_serial_nr __cpuinitdata = 1; |
@@ -1202,6 +1210,7 @@ void __cpuinit cpu_init(void) | |||
1202 | dbg_restore_debug_regs(); | 1210 | dbg_restore_debug_regs(); |
1203 | 1211 | ||
1204 | fpu_init(); | 1212 | fpu_init(); |
1213 | xsave_init(); | ||
1205 | 1214 | ||
1206 | raw_local_save_flags(kernel_eflags); | 1215 | raw_local_save_flags(kernel_eflags); |
1207 | 1216 | ||
@@ -1262,12 +1271,7 @@ void __cpuinit cpu_init(void) | |||
1262 | clear_used_math(); | 1271 | clear_used_math(); |
1263 | mxcsr_feature_mask_init(); | 1272 | mxcsr_feature_mask_init(); |
1264 | 1273 | ||
1265 | /* | 1274 | fpu_init(); |
1266 | * Boot processor to setup the FP and extended state context info. | ||
1267 | */ | ||
1268 | if (smp_processor_id() == boot_cpu_id) | ||
1269 | init_thread_xstate(); | ||
1270 | |||
1271 | xsave_init(); | 1275 | xsave_init(); |
1272 | } | 1276 | } |
1273 | #endif | 1277 | #endif |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index c4444bce8469..1f11f5ce668f 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -59,18 +59,18 @@ void __cpuinit mxcsr_feature_mask_init(void) | |||
59 | stts(); | 59 | stts(); |
60 | } | 60 | } |
61 | 61 | ||
62 | void __cpuinit init_thread_xstate(void) | 62 | static void __cpuinit init_thread_xstate(void) |
63 | { | 63 | { |
64 | /* | ||
65 | * Note that xstate_size might be overwriten later during | ||
66 | * xsave_init(). | ||
67 | */ | ||
68 | |||
64 | if (!HAVE_HWFP) { | 69 | if (!HAVE_HWFP) { |
65 | xstate_size = sizeof(struct i387_soft_struct); | 70 | xstate_size = sizeof(struct i387_soft_struct); |
66 | return; | 71 | return; |
67 | } | 72 | } |
68 | 73 | ||
69 | if (cpu_has_xsave) { | ||
70 | xsave_cntxt_init(); | ||
71 | return; | ||
72 | } | ||
73 | |||
74 | if (cpu_has_fxsr) | 74 | if (cpu_has_fxsr) |
75 | xstate_size = sizeof(struct i387_fxsave_struct); | 75 | xstate_size = sizeof(struct i387_fxsave_struct); |
76 | #ifdef CONFIG_X86_32 | 76 | #ifdef CONFIG_X86_32 |
@@ -84,6 +84,7 @@ void __cpuinit init_thread_xstate(void) | |||
84 | * Called at bootup to set up the initial FPU state that is later cloned | 84 | * Called at bootup to set up the initial FPU state that is later cloned |
85 | * into all processes. | 85 | * into all processes. |
86 | */ | 86 | */ |
87 | |||
87 | void __cpuinit fpu_init(void) | 88 | void __cpuinit fpu_init(void) |
88 | { | 89 | { |
89 | unsigned long oldcr0 = read_cr0(); | 90 | unsigned long oldcr0 = read_cr0(); |
@@ -93,19 +94,24 @@ void __cpuinit fpu_init(void) | |||
93 | 94 | ||
94 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 95 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
95 | 96 | ||
96 | /* | ||
97 | * Boot processor to setup the FP and extended state context info. | ||
98 | */ | ||
99 | if (!smp_processor_id()) | 97 | if (!smp_processor_id()) |
100 | init_thread_xstate(); | 98 | init_thread_xstate(); |
101 | xsave_init(); | ||
102 | 99 | ||
103 | mxcsr_feature_mask_init(); | 100 | mxcsr_feature_mask_init(); |
104 | /* clean state in init */ | 101 | /* clean state in init */ |
105 | current_thread_info()->status = 0; | 102 | current_thread_info()->status = 0; |
106 | clear_used_math(); | 103 | clear_used_math(); |
107 | } | 104 | } |
108 | #endif /* CONFIG_X86_64 */ | 105 | |
106 | #else /* CONFIG_X86_64 */ | ||
107 | |||
108 | void __cpuinit fpu_init(void) | ||
109 | { | ||
110 | if (!smp_processor_id()) | ||
111 | init_thread_xstate(); | ||
112 | } | ||
113 | |||
114 | #endif /* CONFIG_X86_32 */ | ||
109 | 115 | ||
110 | void fpu_finit(struct fpu *fpu) | 116 | void fpu_finit(struct fpu *fpu) |
111 | { | 117 | { |
@@ -191,6 +197,8 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
191 | if (ret) | 197 | if (ret) |
192 | return ret; | 198 | return ret; |
193 | 199 | ||
200 | sanitize_i387_state(target); | ||
201 | |||
194 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 202 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
195 | &target->thread.fpu.state->fxsave, 0, -1); | 203 | &target->thread.fpu.state->fxsave, 0, -1); |
196 | } | 204 | } |
@@ -208,6 +216,8 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
208 | if (ret) | 216 | if (ret) |
209 | return ret; | 217 | return ret; |
210 | 218 | ||
219 | sanitize_i387_state(target); | ||
220 | |||
211 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 221 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
212 | &target->thread.fpu.state->fxsave, 0, -1); | 222 | &target->thread.fpu.state->fxsave, 0, -1); |
213 | 223 | ||
@@ -447,6 +457,8 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
447 | -1); | 457 | -1); |
448 | } | 458 | } |
449 | 459 | ||
460 | sanitize_i387_state(target); | ||
461 | |||
450 | if (kbuf && pos == 0 && count == sizeof(env)) { | 462 | if (kbuf && pos == 0 && count == sizeof(env)) { |
451 | convert_from_fxsr(kbuf, target); | 463 | convert_from_fxsr(kbuf, target); |
452 | return 0; | 464 | return 0; |
@@ -468,6 +480,8 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
468 | if (ret) | 480 | if (ret) |
469 | return ret; | 481 | return ret; |
470 | 482 | ||
483 | sanitize_i387_state(target); | ||
484 | |||
471 | if (!HAVE_HWFP) | 485 | if (!HAVE_HWFP) |
472 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 486 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
473 | 487 | ||
@@ -534,6 +548,9 @@ static int save_i387_xsave(void __user *buf) | |||
534 | struct _fpstate_ia32 __user *fx = buf; | 548 | struct _fpstate_ia32 __user *fx = buf; |
535 | int err = 0; | 549 | int err = 0; |
536 | 550 | ||
551 | |||
552 | sanitize_i387_state(tsk); | ||
553 | |||
537 | /* | 554 | /* |
538 | * For legacy compatible, we always set FP/SSE bits in the bit | 555 | * For legacy compatible, we always set FP/SSE bits in the bit |
539 | * vector while saving the state to the user context. | 556 | * vector while saving the state to the user context. |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a4ae302f03aa..9c253bd65e24 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -16,11 +16,88 @@ | |||
16 | */ | 16 | */ |
17 | u64 pcntxt_mask; | 17 | u64 pcntxt_mask; |
18 | 18 | ||
19 | /* | ||
20 | * Represents init state for the supported extended state. | ||
21 | */ | ||
22 | static struct xsave_struct *init_xstate_buf; | ||
23 | |||
19 | struct _fpx_sw_bytes fx_sw_reserved; | 24 | struct _fpx_sw_bytes fx_sw_reserved; |
20 | #ifdef CONFIG_IA32_EMULATION | 25 | #ifdef CONFIG_IA32_EMULATION |
21 | struct _fpx_sw_bytes fx_sw_reserved_ia32; | 26 | struct _fpx_sw_bytes fx_sw_reserved_ia32; |
22 | #endif | 27 | #endif |
23 | 28 | ||
29 | static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; | ||
30 | |||
31 | /* | ||
32 | * If a processor implementation discern that a processor state component is | ||
33 | * in its initialized state it may modify the corresponding bit in the | ||
34 | * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory | ||
35 | * layout in the case of xsaveopt. While presenting the xstate information to | ||
36 | * the user, we always ensure that the memory layout of a feature will be in | ||
37 | * the init state if the corresponding header bit is zero. This is to ensure | ||
38 | * that the user doesn't see some stale state in the memory layout during | ||
39 | * signal handling, debugging etc. | ||
40 | */ | ||
41 | void __sanitize_i387_state(struct task_struct *tsk) | ||
42 | { | ||
43 | u64 xstate_bv; | ||
44 | int feature_bit = 0x2; | ||
45 | struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; | ||
46 | |||
47 | if (!fx) | ||
48 | return; | ||
49 | |||
50 | BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); | ||
51 | |||
52 | xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; | ||
53 | |||
54 | /* | ||
55 | * None of the feature bits are in init state. So nothing else | ||
56 | * to do for us, as the memory layout is upto date. | ||
57 | */ | ||
58 | if ((xstate_bv & pcntxt_mask) == pcntxt_mask) | ||
59 | return; | ||
60 | |||
61 | /* | ||
62 | * FP is in init state | ||
63 | */ | ||
64 | if (!(xstate_bv & XSTATE_FP)) { | ||
65 | fx->cwd = 0x37f; | ||
66 | fx->swd = 0; | ||
67 | fx->twd = 0; | ||
68 | fx->fop = 0; | ||
69 | fx->rip = 0; | ||
70 | fx->rdp = 0; | ||
71 | memset(&fx->st_space[0], 0, 128); | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * SSE is in init state | ||
76 | */ | ||
77 | if (!(xstate_bv & XSTATE_SSE)) | ||
78 | memset(&fx->xmm_space[0], 0, 256); | ||
79 | |||
80 | xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2; | ||
81 | |||
82 | /* | ||
83 | * Update all the other memory layouts for which the corresponding | ||
84 | * header bit is in the init state. | ||
85 | */ | ||
86 | while (xstate_bv) { | ||
87 | if (xstate_bv & 0x1) { | ||
88 | int offset = xstate_offsets[feature_bit]; | ||
89 | int size = xstate_sizes[feature_bit]; | ||
90 | |||
91 | memcpy(((void *) fx) + offset, | ||
92 | ((void *) init_xstate_buf) + offset, | ||
93 | size); | ||
94 | } | ||
95 | |||
96 | xstate_bv >>= 1; | ||
97 | feature_bit++; | ||
98 | } | ||
99 | } | ||
100 | |||
24 | /* | 101 | /* |
25 | * Check for the presence of extended state information in the | 102 | * Check for the presence of extended state information in the |
26 | * user fpstate pointer in the sigcontext. | 103 | * user fpstate pointer in the sigcontext. |
@@ -102,6 +179,7 @@ int save_i387_xstate(void __user *buf) | |||
102 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 179 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
103 | stts(); | 180 | stts(); |
104 | } else { | 181 | } else { |
182 | sanitize_i387_state(tsk); | ||
105 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, | 183 | if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, |
106 | xstate_size)) | 184 | xstate_size)) |
107 | return -1; | 185 | return -1; |
@@ -267,11 +345,6 @@ static void prepare_fx_sw_frame(void) | |||
267 | #endif | 345 | #endif |
268 | } | 346 | } |
269 | 347 | ||
270 | /* | ||
271 | * Represents init state for the supported extended state. | ||
272 | */ | ||
273 | struct xsave_struct *init_xstate_buf; | ||
274 | |||
275 | #ifdef CONFIG_X86_64 | 348 | #ifdef CONFIG_X86_64 |
276 | unsigned int sig_xstate_size = sizeof(struct _fpstate); | 349 | unsigned int sig_xstate_size = sizeof(struct _fpstate); |
277 | #endif | 350 | #endif |
@@ -279,37 +352,77 @@ unsigned int sig_xstate_size = sizeof(struct _fpstate); | |||
279 | /* | 352 | /* |
280 | * Enable the extended processor state save/restore feature | 353 | * Enable the extended processor state save/restore feature |
281 | */ | 354 | */ |
282 | void __cpuinit xsave_init(void) | 355 | static inline void xstate_enable(void) |
283 | { | 356 | { |
284 | if (!cpu_has_xsave) | ||
285 | return; | ||
286 | |||
287 | set_in_cr4(X86_CR4_OSXSAVE); | 357 | set_in_cr4(X86_CR4_OSXSAVE); |
288 | |||
289 | /* | ||
290 | * Enable all the features that the HW is capable of | ||
291 | * and the Linux kernel is aware of. | ||
292 | */ | ||
293 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | 358 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); |
294 | } | 359 | } |
295 | 360 | ||
296 | /* | 361 | /* |
362 | * Record the offsets and sizes of different state managed by the xsave | ||
363 | * memory layout. | ||
364 | */ | ||
365 | static void __init setup_xstate_features(void) | ||
366 | { | ||
367 | int eax, ebx, ecx, edx, leaf = 0x2; | ||
368 | |||
369 | xstate_features = fls64(pcntxt_mask); | ||
370 | xstate_offsets = alloc_bootmem(xstate_features * sizeof(int)); | ||
371 | xstate_sizes = alloc_bootmem(xstate_features * sizeof(int)); | ||
372 | |||
373 | do { | ||
374 | cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx); | ||
375 | |||
376 | if (eax == 0) | ||
377 | break; | ||
378 | |||
379 | xstate_offsets[leaf] = ebx; | ||
380 | xstate_sizes[leaf] = eax; | ||
381 | |||
382 | leaf++; | ||
383 | } while (1); | ||
384 | } | ||
385 | |||
386 | /* | ||
297 | * setup the xstate image representing the init state | 387 | * setup the xstate image representing the init state |
298 | */ | 388 | */ |
299 | static void __init setup_xstate_init(void) | 389 | static void __init setup_xstate_init(void) |
300 | { | 390 | { |
391 | setup_xstate_features(); | ||
392 | |||
393 | /* | ||
394 | * Setup init_xstate_buf to represent the init state of | ||
395 | * all the features managed by the xsave | ||
396 | */ | ||
301 | init_xstate_buf = alloc_bootmem(xstate_size); | 397 | init_xstate_buf = alloc_bootmem(xstate_size); |
302 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | 398 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; |
399 | |||
400 | clts(); | ||
401 | /* | ||
402 | * Init all the features state with header_bv being 0x0 | ||
403 | */ | ||
404 | xrstor_state(init_xstate_buf, -1); | ||
405 | /* | ||
406 | * Dump the init state again. This is to identify the init state | ||
407 | * of any feature which is not represented by all zero's. | ||
408 | */ | ||
409 | xsave_state(init_xstate_buf, -1); | ||
410 | stts(); | ||
303 | } | 411 | } |
304 | 412 | ||
305 | /* | 413 | /* |
306 | * Enable and initialize the xsave feature. | 414 | * Enable and initialize the xsave feature. |
307 | */ | 415 | */ |
308 | void __ref xsave_cntxt_init(void) | 416 | static void __init xstate_enable_boot_cpu(void) |
309 | { | 417 | { |
310 | unsigned int eax, ebx, ecx, edx; | 418 | unsigned int eax, ebx, ecx, edx; |
311 | 419 | ||
312 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | 420 | if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { |
421 | WARN(1, KERN_ERR "XSTATE_CPUID missing\n"); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); | ||
313 | pcntxt_mask = eax + ((u64)edx << 32); | 426 | pcntxt_mask = eax + ((u64)edx << 32); |
314 | 427 | ||
315 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { | 428 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { |
@@ -322,12 +435,13 @@ void __ref xsave_cntxt_init(void) | |||
322 | * Support only the state known to OS. | 435 | * Support only the state known to OS. |
323 | */ | 436 | */ |
324 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | 437 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; |
325 | xsave_init(); | 438 | |
439 | xstate_enable(); | ||
326 | 440 | ||
327 | /* | 441 | /* |
328 | * Recompute the context size for enabled features | 442 | * Recompute the context size for enabled features |
329 | */ | 443 | */ |
330 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | 444 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); |
331 | xstate_size = ebx; | 445 | xstate_size = ebx; |
332 | 446 | ||
333 | update_regset_xstate_info(xstate_size, pcntxt_mask); | 447 | update_regset_xstate_info(xstate_size, pcntxt_mask); |
@@ -339,3 +453,23 @@ void __ref xsave_cntxt_init(void) | |||
339 | "cntxt size 0x%x\n", | 453 | "cntxt size 0x%x\n", |
340 | pcntxt_mask, xstate_size); | 454 | pcntxt_mask, xstate_size); |
341 | } | 455 | } |
456 | |||
457 | /* | ||
458 | * For the very first instance, this calls xstate_enable_boot_cpu(); | ||
459 | * for all subsequent instances, this calls xstate_enable(). | ||
460 | * | ||
461 | * This is somewhat obfuscated due to the lack of powerful enough | ||
462 | * overrides for the section checks. | ||
463 | */ | ||
464 | void __cpuinit xsave_init(void) | ||
465 | { | ||
466 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; | ||
467 | void (*this_func)(void); | ||
468 | |||
469 | if (!cpu_has_xsave) | ||
470 | return; | ||
471 | |||
472 | this_func = next_func; | ||
473 | next_func = xstate_enable; | ||
474 | this_func(); | ||
475 | } | ||