aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-09-26 03:43:36 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-26 03:43:36 -0400
commite4a81bfcaae1ebbdc6efe74e8ea563144d90e9a9 (patch)
tree7b1d916413eab7beb165eb969208930730edcfc8
parent685c930d6e58e31e251ec354f9dca3958a4c5040 (diff)
x86/fpu: Rename fpu::fpstate_active to fpu::initialized
The x86 FPU code used to have a complex state machine where both the FPU registers and the FPU state context could be 'active' (or inactive) independently of each other - which enabled features like lazy FPU restore. Much of this complexity is gone in the current code: now we basically can have FPU-less tasks (kernel threads) that don't use (and save/restore) FPU state at all, plus full FPU users that save/restore directly with no laziness whatsoever. But the fpu::fpstate_active still carries bits of the old complexity - meanwhile this flag has become a simple flag that shows whether the FPU context saving area in the thread struct is initialized and used, or not. Rename it to fpu::initialized to express this simplicity in the name as well. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Eric Biggers <ebiggers3@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yu-cheng Yu <yu-cheng.yu@intel.com> Link: http://lkml.kernel.org/r/20170923130016.21448-30-mingo@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/fpu/internal.h4
-rw-r--r--arch/x86/include/asm/fpu/types.h6
-rw-r--r--arch/x86/include/asm/trace/fpu.h8
-rw-r--r--arch/x86/kernel/fpu/core.c24
-rw-r--r--arch/x86/kernel/fpu/init.c2
-rw-r--r--arch/x86/kernel/fpu/regset.c6
-rw-r--r--arch/x86/kernel/fpu/signal.c8
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/mm/pkeys.c2
11 files changed, 35 insertions, 35 deletions
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index e0bb46c02857..0e2a5edbce00 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
231 ksig->ka.sa.sa_restorer) 231 ksig->ka.sa.sa_restorer)
232 sp = (unsigned long) ksig->ka.sa.sa_restorer; 232 sp = (unsigned long) ksig->ka.sa.sa_restorer;
233 233
234 if (fpu->fpstate_active) { 234 if (fpu->initialized) {
235 unsigned long fx_aligned, math_size; 235 unsigned long fx_aligned, math_size;
236 236
237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); 237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 508e4181c4af..b26ae05da18a 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -527,7 +527,7 @@ static inline void fpregs_activate(struct fpu *fpu)
527static inline void 527static inline void
528switch_fpu_prepare(struct fpu *old_fpu, int cpu) 528switch_fpu_prepare(struct fpu *old_fpu, int cpu)
529{ 529{
530 if (old_fpu->fpstate_active) { 530 if (old_fpu->initialized) {
531 if (!copy_fpregs_to_fpstate(old_fpu)) 531 if (!copy_fpregs_to_fpstate(old_fpu))
532 old_fpu->last_cpu = -1; 532 old_fpu->last_cpu = -1;
533 else 533 else
@@ -550,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
550static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) 550static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
551{ 551{
552 bool preload = static_cpu_has(X86_FEATURE_FPU) && 552 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
553 new_fpu->fpstate_active; 553 new_fpu->initialized;
554 554
555 if (preload) { 555 if (preload) {
556 if (!fpregs_state_valid(new_fpu, cpu)) 556 if (!fpregs_state_valid(new_fpu, cpu))
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 71db45ca8870..a1520575d86b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -293,13 +293,13 @@ struct fpu {
293 unsigned int last_cpu; 293 unsigned int last_cpu;
294 294
295 /* 295 /*
296 * @fpstate_active: 296 * @initialized:
297 * 297 *
298 * This flag indicates whether this context is active: if the task 298 * This flag indicates whether this context is initialized: if the task
299 * is not running then we can restore from this context, if the task 299 * is not running then we can restore from this context, if the task
300 * is running then we should save into this context. 300 * is running then we should save into this context.
301 */ 301 */
302 unsigned char fpstate_active; 302 unsigned char initialized;
303 303
304 /* 304 /*
305 * @state: 305 * @state:
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index da565aae9fd2..39f7a27bef13 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -12,22 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
12 12
13 TP_STRUCT__entry( 13 TP_STRUCT__entry(
14 __field(struct fpu *, fpu) 14 __field(struct fpu *, fpu)
15 __field(bool, fpstate_active) 15 __field(bool, initialized)
16 __field(u64, xfeatures) 16 __field(u64, xfeatures)
17 __field(u64, xcomp_bv) 17 __field(u64, xcomp_bv)
18 ), 18 ),
19 19
20 TP_fast_assign( 20 TP_fast_assign(
21 __entry->fpu = fpu; 21 __entry->fpu = fpu;
22 __entry->fpstate_active = fpu->fpstate_active; 22 __entry->initialized = fpu->initialized;
23 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { 23 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
24 __entry->xfeatures = fpu->state.xsave.header.xfeatures; 24 __entry->xfeatures = fpu->state.xsave.header.xfeatures;
25 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; 25 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
26 } 26 }
27 ), 27 ),
28 TP_printk("x86/fpu: %p fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", 28 TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
29 __entry->fpu, 29 __entry->fpu,
30 __entry->fpstate_active, 30 __entry->initialized,
31 __entry->xfeatures, 31 __entry->xfeatures,
32 __entry->xcomp_bv 32 __entry->xcomp_bv
33 ) 33 )
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index b2cdeb3b1860..c8d6032f04d0 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
100 100
101 kernel_fpu_disable(); 101 kernel_fpu_disable();
102 102
103 if (fpu->fpstate_active) { 103 if (fpu->initialized) {
104 /* 104 /*
105 * Ignore return value -- we don't care if reg state 105 * Ignore return value -- we don't care if reg state
106 * is clobbered. 106 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
116{ 116{
117 struct fpu *fpu = &current->thread.fpu; 117 struct fpu *fpu = &current->thread.fpu;
118 118
119 if (fpu->fpstate_active) 119 if (fpu->initialized)
120 copy_kernel_to_fpregs(&fpu->state); 120 copy_kernel_to_fpregs(&fpu->state);
121 121
122 kernel_fpu_enable(); 122 kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
148 148
149 preempt_disable(); 149 preempt_disable();
150 trace_x86_fpu_before_save(fpu); 150 trace_x86_fpu_before_save(fpu);
151 if (fpu->fpstate_active) { 151 if (fpu->initialized) {
152 if (!copy_fpregs_to_fpstate(fpu)) { 152 if (!copy_fpregs_to_fpstate(fpu)) {
153 copy_kernel_to_fpregs(&fpu->state); 153 copy_kernel_to_fpregs(&fpu->state);
154 } 154 }
@@ -191,7 +191,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
191{ 191{
192 dst_fpu->last_cpu = -1; 192 dst_fpu->last_cpu = -1;
193 193
194 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) 194 if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
195 return 0; 195 return 0;
196 196
197 WARN_ON_FPU(src_fpu != &current->thread.fpu); 197 WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -240,13 +240,13 @@ void fpu__activate_curr(struct fpu *fpu)
240{ 240{
241 WARN_ON_FPU(fpu != &current->thread.fpu); 241 WARN_ON_FPU(fpu != &current->thread.fpu);
242 242
243 if (!fpu->fpstate_active) { 243 if (!fpu->initialized) {
244 fpstate_init(&fpu->state); 244 fpstate_init(&fpu->state);
245 trace_x86_fpu_init_state(fpu); 245 trace_x86_fpu_init_state(fpu);
246 246
247 trace_x86_fpu_activate_state(fpu); 247 trace_x86_fpu_activate_state(fpu);
248 /* Safe to do for the current task: */ 248 /* Safe to do for the current task: */
249 fpu->fpstate_active = 1; 249 fpu->initialized = 1;
250 } 250 }
251} 251}
252EXPORT_SYMBOL_GPL(fpu__activate_curr); 252EXPORT_SYMBOL_GPL(fpu__activate_curr);
@@ -271,13 +271,13 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
271 if (fpu == &current->thread.fpu) { 271 if (fpu == &current->thread.fpu) {
272 fpu__save(fpu); 272 fpu__save(fpu);
273 } else { 273 } else {
274 if (!fpu->fpstate_active) { 274 if (!fpu->initialized) {
275 fpstate_init(&fpu->state); 275 fpstate_init(&fpu->state);
276 trace_x86_fpu_init_state(fpu); 276 trace_x86_fpu_init_state(fpu);
277 277
278 trace_x86_fpu_activate_state(fpu); 278 trace_x86_fpu_activate_state(fpu);
279 /* Safe to do for current and for stopped child tasks: */ 279 /* Safe to do for current and for stopped child tasks: */
280 fpu->fpstate_active = 1; 280 fpu->initialized = 1;
281 } 281 }
282 } 282 }
283} 283}
@@ -303,7 +303,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
303 */ 303 */
304 WARN_ON_FPU(fpu == &current->thread.fpu); 304 WARN_ON_FPU(fpu == &current->thread.fpu);
305 305
306 if (fpu->fpstate_active) { 306 if (fpu->initialized) {
307 /* Invalidate any lazy state: */ 307 /* Invalidate any lazy state: */
308 __fpu_invalidate_fpregs_state(fpu); 308 __fpu_invalidate_fpregs_state(fpu);
309 } else { 309 } else {
@@ -312,7 +312,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
312 312
313 trace_x86_fpu_activate_state(fpu); 313 trace_x86_fpu_activate_state(fpu);
314 /* Safe to do for stopped child tasks: */ 314 /* Safe to do for stopped child tasks: */
315 fpu->fpstate_active = 1; 315 fpu->initialized = 1;
316 } 316 }
317} 317}
318 318
@@ -354,7 +354,7 @@ void fpu__drop(struct fpu *fpu)
354 preempt_disable(); 354 preempt_disable();
355 355
356 if (fpu == &current->thread.fpu) { 356 if (fpu == &current->thread.fpu) {
357 if (fpu->fpstate_active) { 357 if (fpu->initialized) {
358 /* Ignore delayed exceptions from user space */ 358 /* Ignore delayed exceptions from user space */
359 asm volatile("1: fwait\n" 359 asm volatile("1: fwait\n"
360 "2:\n" 360 "2:\n"
@@ -363,7 +363,7 @@ void fpu__drop(struct fpu *fpu)
363 } 363 }
364 } 364 }
365 365
366 fpu->fpstate_active = 0; 366 fpu->initialized = 0;
367 367
368 trace_x86_fpu_dropped(fpu); 368 trace_x86_fpu_dropped(fpu);
369 369
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index d5d44c452624..7affb7e3d9a5 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
240 WARN_ON_FPU(!on_boot_cpu); 240 WARN_ON_FPU(!on_boot_cpu);
241 on_boot_cpu = 0; 241 on_boot_cpu = 0;
242 242
243 WARN_ON_FPU(current->thread.fpu.fpstate_active); 243 WARN_ON_FPU(current->thread.fpu.initialized);
244} 244}
245 245
246/* 246/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index c764f7405322..19e82334e811 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
16{ 16{
17 struct fpu *target_fpu = &target->thread.fpu; 17 struct fpu *target_fpu = &target->thread.fpu;
18 18
19 return target_fpu->fpstate_active ? regset->n : 0; 19 return target_fpu->initialized ? regset->n : 0;
20} 20}
21 21
22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) 22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
23{ 23{
24 struct fpu *target_fpu = &target->thread.fpu; 24 struct fpu *target_fpu = &target->thread.fpu;
25 25
26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) 26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
27 return regset->n; 27 return regset->n;
28 else 28 else
29 return 0; 29 return 0;
@@ -380,7 +380,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
380 struct fpu *fpu = &tsk->thread.fpu; 380 struct fpu *fpu = &tsk->thread.fpu;
381 int fpvalid; 381 int fpvalid;
382 382
383 fpvalid = fpu->fpstate_active; 383 fpvalid = fpu->initialized;
384 if (fpvalid) 384 if (fpvalid)
385 fpvalid = !fpregs_get(tsk, NULL, 385 fpvalid = !fpregs_get(tsk, NULL,
386 0, sizeof(struct user_i387_ia32_struct), 386 0, sizeof(struct user_i387_ia32_struct),
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index da68ea1c3a44..ab2dd24cfea4 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -171,7 +171,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
171 sizeof(struct user_i387_ia32_struct), NULL, 171 sizeof(struct user_i387_ia32_struct), NULL,
172 (struct _fpstate_32 __user *) buf) ? -1 : 1; 172 (struct _fpstate_32 __user *) buf) ? -1 : 1;
173 173
174 if (fpu->fpstate_active || using_compacted_format()) { 174 if (fpu->initialized || using_compacted_format()) {
175 /* Save the live register state to the user directly. */ 175 /* Save the live register state to the user directly. */
176 if (copy_fpregs_to_sigframe(buf_fx)) 176 if (copy_fpregs_to_sigframe(buf_fx))
177 return -1; 177 return -1;
@@ -315,12 +315,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
315 int err = 0; 315 int err = 0;
316 316
317 /* 317 /*
318 * Drop the current fpu which clears fpu->fpstate_active. This ensures 318 * Drop the current fpu which clears fpu->initialized. This ensures
319 * that any context-switch during the copy of the new state, 319 * that any context-switch during the copy of the new state,
320 * avoids the intermediate state from getting restored/saved. 320 * avoids the intermediate state from getting restored/saved.
321 * Thus avoiding the new restored state from getting corrupted. 321 * Thus avoiding the new restored state from getting corrupted.
322 * We will be ready to restore/save the state only after 322 * We will be ready to restore/save the state only after
323 * fpu->fpstate_active is again set. 323 * fpu->initialized is again set.
324 */ 324 */
325 fpu__drop(fpu); 325 fpu__drop(fpu);
326 326
@@ -342,7 +342,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
342 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 342 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
343 } 343 }
344 344
345 fpu->fpstate_active = 1; 345 fpu->initialized = 1;
346 preempt_disable(); 346 preempt_disable();
347 fpu__restore(fpu); 347 fpu__restore(fpu);
348 preempt_enable(); 348 preempt_enable();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index fda1109cc355..703e76d027ee 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -867,7 +867,7 @@ const void *get_xsave_field_ptr(int xsave_state)
867{ 867{
868 struct fpu *fpu = &current->thread.fpu; 868 struct fpu *fpu = &current->thread.fpu;
869 869
870 if (!fpu->fpstate_active) 870 if (!fpu->initialized)
871 return NULL; 871 return NULL;
872 /* 872 /*
873 * fpu__save() takes the CPU's xstate registers 873 * fpu__save() takes the CPU's xstate registers
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e04442345fc0..4e188fda5961 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
263 sp = (unsigned long) ka->sa.sa_restorer; 263 sp = (unsigned long) ka->sa.sa_restorer;
264 } 264 }
265 265
266 if (fpu->fpstate_active) { 266 if (fpu->initialized) {
267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), 267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
268 &buf_fx, &math_size); 268 &buf_fx, &math_size);
269 *fpstate = (void __user *)sp; 269 *fpstate = (void __user *)sp;
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
279 return (void __user *)-1L; 279 return (void __user *)-1L;
280 280
281 /* save i387 and extended state */ 281 /* save i387 and extended state */
282 if (fpu->fpstate_active && 282 if (fpu->initialized &&
283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) 283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
284 return (void __user *)-1L; 284 return (void __user *)-1L;
285 285
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
755 /* 755 /*
756 * Ensure the signal handler starts with the new fpu state. 756 * Ensure the signal handler starts with the new fpu state.
757 */ 757 */
758 if (fpu->fpstate_active) 758 if (fpu->initialized)
759 fpu__clear(fpu); 759 fpu__clear(fpu);
760 } 760 }
761 signal_setup_done(failed, ksig, stepping); 761 signal_setup_done(failed, ksig, stepping);
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 4d24269c071f..d7bc0eea20a5 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -44,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
44 */ 44 */
45 preempt_disable(); 45 preempt_disable();
46 if (!need_to_set_mm_pkey && 46 if (!need_to_set_mm_pkey &&
47 current->thread.fpu.fpstate_active && 47 current->thread.fpu.initialized &&
48 !__pkru_allows_read(read_pkru(), execute_only_pkey)) { 48 !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
49 preempt_enable(); 49 preempt_enable();
50 return execute_only_pkey; 50 return execute_only_pkey;