aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-09-26 04:17:43 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-26 04:17:43 -0400
commit8474c532b558fb6754c0ddff42a39ed6636671c9 (patch)
tree2de070143c26d6d70e06f576e37ae0255d6ace35
parente365806ac289457263a133bd32df8df49897f612 (diff)
parent738f48cb5fdd5878d11934f1898aa2bcf1578289 (diff)
Merge branch 'WIP.x86/fpu' into x86/fpu, because it's ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/fpu/internal.h90
-rw-r--r--arch/x86/include/asm/fpu/types.h32
-rw-r--r--arch/x86/include/asm/fpu/xstate.h12
-rw-r--r--arch/x86/include/asm/trace/fpu.h11
-rw-r--r--arch/x86/kernel/fpu/core.c155
-rw-r--r--arch/x86/kernel/fpu/init.c2
-rw-r--r--arch/x86/kernel/fpu/regset.c48
-rw-r--r--arch/x86/kernel/fpu/signal.c37
-rw-r--r--arch/x86/kernel/fpu/xstate.c264
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/math-emu/fpu_entry.c2
-rw-r--r--arch/x86/mm/extable.c24
-rw-r--r--arch/x86/mm/pkeys.c3
15 files changed, 375 insertions, 315 deletions
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index e0bb46c02857..0e2a5edbce00 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
231 ksig->ka.sa.sa_restorer) 231 ksig->ka.sa.sa_restorer)
232 sp = (unsigned long) ksig->ka.sa.sa_restorer; 232 sp = (unsigned long) ksig->ka.sa.sa_restorer;
233 233
234 if (fpu->fpstate_active) { 234 if (fpu->initialized) {
235 unsigned long fx_aligned, math_size; 235 unsigned long fx_aligned, math_size;
236 236
237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); 237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 554cdb205d17..e3221ffa304e 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -23,11 +23,9 @@
23/* 23/*
24 * High level FPU state handling functions: 24 * High level FPU state handling functions:
25 */ 25 */
26extern void fpu__activate_curr(struct fpu *fpu); 26extern void fpu__initialize(struct fpu *fpu);
27extern void fpu__activate_fpstate_read(struct fpu *fpu); 27extern void fpu__prepare_read(struct fpu *fpu);
28extern void fpu__activate_fpstate_write(struct fpu *fpu); 28extern void fpu__prepare_write(struct fpu *fpu);
29extern void fpu__current_fpstate_write_begin(void);
30extern void fpu__current_fpstate_write_end(void);
31extern void fpu__save(struct fpu *fpu); 29extern void fpu__save(struct fpu *fpu);
32extern void fpu__restore(struct fpu *fpu); 30extern void fpu__restore(struct fpu *fpu);
33extern int fpu__restore_sig(void __user *buf, int ia32_frame); 31extern int fpu__restore_sig(void __user *buf, int ia32_frame);
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
120 err; \ 118 err; \
121}) 119})
122 120
123#define check_insn(insn, output, input...) \ 121#define kernel_insn(insn, output, input...) \
124({ \
125 int err; \
126 asm volatile("1:" #insn "\n\t" \ 122 asm volatile("1:" #insn "\n\t" \
127 "2:\n" \ 123 "2:\n" \
128 ".section .fixup,\"ax\"\n" \ 124 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
129 "3: movl $-1,%[err]\n" \ 125 : output : input)
130 " jmp 2b\n" \
131 ".previous\n" \
132 _ASM_EXTABLE(1b, 3b) \
133 : [err] "=r" (err), output \
134 : "0"(0), input); \
135 err; \
136})
137 126
138static inline int copy_fregs_to_user(struct fregs_state __user *fx) 127static inline int copy_fregs_to_user(struct fregs_state __user *fx)
139{ 128{
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153 142
154static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) 143static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
155{ 144{
156 int err;
157
158 if (IS_ENABLED(CONFIG_X86_32)) { 145 if (IS_ENABLED(CONFIG_X86_32)) {
159 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 146 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
160 } else { 147 } else {
161 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { 148 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
162 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 149 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
163 } else { 150 } else {
164 /* See comment in copy_fxregs_to_kernel() below. */ 151 /* See comment in copy_fxregs_to_kernel() below. */
165 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); 152 kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
166 } 153 }
167 } 154 }
168 /* Copying from a kernel buffer to FPU registers should never fail: */
169 WARN_ON_FPU(err);
170} 155}
171 156
172static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) 157static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
183 168
184static inline void copy_kernel_to_fregs(struct fregs_state *fx) 169static inline void copy_kernel_to_fregs(struct fregs_state *fx)
185{ 170{
186 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 171 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187
188 WARN_ON_FPU(err);
189} 172}
190 173
191static inline int copy_user_to_fregs(struct fregs_state __user *fx) 174static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
281 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact 264 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
282 * XSAVE area format. 265 * XSAVE area format.
283 */ 266 */
284#define XSTATE_XRESTORE(st, lmask, hmask, err) \ 267#define XSTATE_XRESTORE(st, lmask, hmask) \
285 asm volatile(ALTERNATIVE(XRSTOR, \ 268 asm volatile(ALTERNATIVE(XRSTOR, \
286 XRSTORS, X86_FEATURE_XSAVES) \ 269 XRSTORS, X86_FEATURE_XSAVES) \
287 "\n" \ 270 "\n" \
288 "xor %[err], %[err]\n" \
289 "3:\n" \ 271 "3:\n" \
290 ".pushsection .fixup,\"ax\"\n" \ 272 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
291 "4: movl $-2, %[err]\n" \ 273 : \
292 "jmp 3b\n" \
293 ".popsection\n" \
294 _ASM_EXTABLE(661b, 4b) \
295 : [err] "=r" (err) \
296 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 274 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
297 : "memory") 275 : "memory")
298 276
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
336 else 314 else
337 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 315 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
338 316
339 /* We should never fault when copying from a kernel buffer: */ 317 /*
318 * We should never fault when copying from a kernel buffer, and the FPU
319 * state we set at boot time should be valid.
320 */
340 WARN_ON_FPU(err); 321 WARN_ON_FPU(err);
341} 322}
342 323
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
350 u32 hmask = mask >> 32; 331 u32 hmask = mask >> 32;
351 int err; 332 int err;
352 333
353 WARN_ON(!alternatives_patched); 334 WARN_ON_FPU(!alternatives_patched);
354 335
355 XSTATE_XSAVE(xstate, lmask, hmask, err); 336 XSTATE_XSAVE(xstate, lmask, hmask, err);
356 337
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
365{ 346{
366 u32 lmask = mask; 347 u32 lmask = mask;
367 u32 hmask = mask >> 32; 348 u32 hmask = mask >> 32;
368 int err;
369
370 XSTATE_XRESTORE(xstate, lmask, hmask, err);
371 349
372 /* We should never fault when copying from a kernel buffer: */ 350 XSTATE_XRESTORE(xstate, lmask, hmask);
373 WARN_ON_FPU(err);
374} 351}
375 352
376/* 353/*
@@ -526,38 +503,17 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
526 */ 503 */
527static inline void fpregs_deactivate(struct fpu *fpu) 504static inline void fpregs_deactivate(struct fpu *fpu)
528{ 505{
529 WARN_ON_FPU(!fpu->fpregs_active);
530
531 fpu->fpregs_active = 0;
532 this_cpu_write(fpu_fpregs_owner_ctx, NULL); 506 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
533 trace_x86_fpu_regs_deactivated(fpu); 507 trace_x86_fpu_regs_deactivated(fpu);
534} 508}
535 509
536static inline void fpregs_activate(struct fpu *fpu) 510static inline void fpregs_activate(struct fpu *fpu)
537{ 511{
538 WARN_ON_FPU(fpu->fpregs_active);
539
540 fpu->fpregs_active = 1;
541 this_cpu_write(fpu_fpregs_owner_ctx, fpu); 512 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
542 trace_x86_fpu_regs_activated(fpu); 513 trace_x86_fpu_regs_activated(fpu);
543} 514}
544 515
545/* 516/*
546 * The question "does this thread have fpu access?"
547 * is slightly racy, since preemption could come in
548 * and revoke it immediately after the test.
549 *
550 * However, even in that very unlikely scenario,
551 * we can just assume we have FPU access - typically
552 * to save the FP state - we'll just take a #NM
553 * fault and get the FPU access back.
554 */
555static inline int fpregs_active(void)
556{
557 return current->thread.fpu.fpregs_active;
558}
559
560/*
561 * FPU state switching for scheduling. 517 * FPU state switching for scheduling.
562 * 518 *
563 * This is a two-stage process: 519 * This is a two-stage process:
@@ -571,14 +527,13 @@ static inline int fpregs_active(void)
571static inline void 527static inline void
572switch_fpu_prepare(struct fpu *old_fpu, int cpu) 528switch_fpu_prepare(struct fpu *old_fpu, int cpu)
573{ 529{
574 if (old_fpu->fpregs_active) { 530 if (old_fpu->initialized) {
575 if (!copy_fpregs_to_fpstate(old_fpu)) 531 if (!copy_fpregs_to_fpstate(old_fpu))
576 old_fpu->last_cpu = -1; 532 old_fpu->last_cpu = -1;
577 else 533 else
578 old_fpu->last_cpu = cpu; 534 old_fpu->last_cpu = cpu;
579 535
580 /* But leave fpu_fpregs_owner_ctx! */ 536 /* But leave fpu_fpregs_owner_ctx! */
581 old_fpu->fpregs_active = 0;
582 trace_x86_fpu_regs_deactivated(old_fpu); 537 trace_x86_fpu_regs_deactivated(old_fpu);
583 } else 538 } else
584 old_fpu->last_cpu = -1; 539 old_fpu->last_cpu = -1;
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
595static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) 550static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
596{ 551{
597 bool preload = static_cpu_has(X86_FEATURE_FPU) && 552 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
598 new_fpu->fpstate_active; 553 new_fpu->initialized;
599 554
600 if (preload) { 555 if (preload) {
601 if (!fpregs_state_valid(new_fpu, cpu)) 556 if (!fpregs_state_valid(new_fpu, cpu))
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
617 struct fpu *fpu = &current->thread.fpu; 572 struct fpu *fpu = &current->thread.fpu;
618 573
619 preempt_disable(); 574 preempt_disable();
620 if (!fpregs_active()) 575 fpregs_activate(fpu);
621 fpregs_activate(fpu);
622 preempt_enable(); 576 preempt_enable();
623} 577}
624 578
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 3c80f5b9c09d..a1520575d86b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -68,6 +68,9 @@ struct fxregs_state {
68/* Default value for fxregs_state.mxcsr: */ 68/* Default value for fxregs_state.mxcsr: */
69#define MXCSR_DEFAULT 0x1f80 69#define MXCSR_DEFAULT 0x1f80
70 70
71/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
72#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
73
71/* 74/*
72 * Software based FPU emulation state. This is arbitrary really, 75 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand: 76 * it matches the x87 format to make it easier to understand:
@@ -290,36 +293,13 @@ struct fpu {
290 unsigned int last_cpu; 293 unsigned int last_cpu;
291 294
292 /* 295 /*
293 * @fpstate_active: 296 * @initialized:
294 * 297 *
295 * This flag indicates whether this context is active: if the task 298 * This flag indicates whether this context is initialized: if the task
296 * is not running then we can restore from this context, if the task 299 * is not running then we can restore from this context, if the task
297 * is running then we should save into this context. 300 * is running then we should save into this context.
298 */ 301 */
299 unsigned char fpstate_active; 302 unsigned char initialized;
300
301 /*
302 * @fpregs_active:
303 *
304 * This flag determines whether a given context is actively
305 * loaded into the FPU's registers and that those registers
306 * represent the task's current FPU state.
307 *
308 * Note the interaction with fpstate_active:
309 *
310 * # task does not use the FPU:
311 * fpstate_active == 0
312 *
313 * # task uses the FPU and regs are active:
314 * fpstate_active == 1 && fpregs_active == 1
315 *
316 * # the regs are inactive but still match fpstate:
317 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
318 *
319 * The third state is what we use for the lazy restore optimization
320 * on lazy-switching CPUs.
321 */
322 unsigned char fpregs_active;
323 303
324 /* 304 /*
325 * @state: 305 * @state:
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 1b2799e0699a..83fee2469eb7 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void);
48void *get_xsave_addr(struct xregs_state *xsave, int xstate); 48void *get_xsave_addr(struct xregs_state *xsave, int xstate);
49const void *get_xsave_field_ptr(int xstate_field); 49const void *get_xsave_field_ptr(int xstate_field);
50int using_compacted_format(void); 50int using_compacted_format(void);
51int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, 51int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
52 void __user *ubuf, struct xregs_state *xsave); 52int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
53int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, 53int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
54 struct xregs_state *xsave); 54int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
55
56/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
57extern int validate_xstate_header(const struct xstate_header *hdr);
58
55#endif 59#endif
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 342e59789fcd..39f7a27bef13 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
12 12
13 TP_STRUCT__entry( 13 TP_STRUCT__entry(
14 __field(struct fpu *, fpu) 14 __field(struct fpu *, fpu)
15 __field(bool, fpregs_active) 15 __field(bool, initialized)
16 __field(bool, fpstate_active)
17 __field(u64, xfeatures) 16 __field(u64, xfeatures)
18 __field(u64, xcomp_bv) 17 __field(u64, xcomp_bv)
19 ), 18 ),
20 19
21 TP_fast_assign( 20 TP_fast_assign(
22 __entry->fpu = fpu; 21 __entry->fpu = fpu;
23 __entry->fpregs_active = fpu->fpregs_active; 22 __entry->initialized = fpu->initialized;
24 __entry->fpstate_active = fpu->fpstate_active;
25 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { 23 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
26 __entry->xfeatures = fpu->state.xsave.header.xfeatures; 24 __entry->xfeatures = fpu->state.xsave.header.xfeatures;
27 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; 25 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
28 } 26 }
29 ), 27 ),
30 TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", 28 TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
31 __entry->fpu, 29 __entry->fpu,
32 __entry->fpregs_active, 30 __entry->initialized,
33 __entry->fpstate_active,
34 __entry->xfeatures, 31 __entry->xfeatures,
35 __entry->xcomp_bv 32 __entry->xcomp_bv
36 ) 33 )
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e1114f070c2d..f92a6593de1e 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
100 100
101 kernel_fpu_disable(); 101 kernel_fpu_disable();
102 102
103 if (fpu->fpregs_active) { 103 if (fpu->initialized) {
104 /* 104 /*
105 * Ignore return value -- we don't care if reg state 105 * Ignore return value -- we don't care if reg state
106 * is clobbered. 106 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
116{ 116{
117 struct fpu *fpu = &current->thread.fpu; 117 struct fpu *fpu = &current->thread.fpu;
118 118
119 if (fpu->fpregs_active) 119 if (fpu->initialized)
120 copy_kernel_to_fpregs(&fpu->state); 120 copy_kernel_to_fpregs(&fpu->state);
121 121
122 kernel_fpu_enable(); 122 kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
148 148
149 preempt_disable(); 149 preempt_disable();
150 trace_x86_fpu_before_save(fpu); 150 trace_x86_fpu_before_save(fpu);
151 if (fpu->fpregs_active) { 151 if (fpu->initialized) {
152 if (!copy_fpregs_to_fpstate(fpu)) { 152 if (!copy_fpregs_to_fpstate(fpu)) {
153 copy_kernel_to_fpregs(&fpu->state); 153 copy_kernel_to_fpregs(&fpu->state);
154 } 154 }
@@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init);
189 189
190int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) 190int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
191{ 191{
192 dst_fpu->fpregs_active = 0;
193 dst_fpu->last_cpu = -1; 192 dst_fpu->last_cpu = -1;
194 193
195 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) 194 if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
196 return 0; 195 return 0;
197 196
198 WARN_ON_FPU(src_fpu != &current->thread.fpu); 197 WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
206 /* 205 /*
207 * Save current FPU registers directly into the child 206 * Save current FPU registers directly into the child
208 * FPU context, without any memory-to-memory copying. 207 * FPU context, without any memory-to-memory copying.
209 * In lazy mode, if the FPU context isn't loaded into
210 * fpregs, CR0.TS will be set and do_device_not_available
211 * will load the FPU context.
212 * 208 *
213 * We have to do all this with preemption disabled, 209 * ( The function 'fails' in the FNSAVE case, which destroys
214 * mostly because of the FNSAVE case, because in that 210 * register contents so we have to copy them back. )
215 * case we must not allow preemption in the window
216 * between the FNSAVE and us marking the context lazy.
217 *
218 * It shouldn't be an issue as even FNSAVE is plenty
219 * fast in terms of critical section length.
220 */ 211 */
221 preempt_disable();
222 if (!copy_fpregs_to_fpstate(dst_fpu)) { 212 if (!copy_fpregs_to_fpstate(dst_fpu)) {
223 memcpy(&src_fpu->state, &dst_fpu->state, 213 memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
224 fpu_kernel_xstate_size);
225
226 copy_kernel_to_fpregs(&src_fpu->state); 214 copy_kernel_to_fpregs(&src_fpu->state);
227 } 215 }
228 preempt_enable();
229 216
230 trace_x86_fpu_copy_src(src_fpu); 217 trace_x86_fpu_copy_src(src_fpu);
231 trace_x86_fpu_copy_dst(dst_fpu); 218 trace_x86_fpu_copy_dst(dst_fpu);
@@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
237 * Activate the current task's in-memory FPU context, 224 * Activate the current task's in-memory FPU context,
238 * if it has not been used before: 225 * if it has not been used before:
239 */ 226 */
240void fpu__activate_curr(struct fpu *fpu) 227void fpu__initialize(struct fpu *fpu)
241{ 228{
242 WARN_ON_FPU(fpu != &current->thread.fpu); 229 WARN_ON_FPU(fpu != &current->thread.fpu);
243 230
244 if (!fpu->fpstate_active) { 231 if (!fpu->initialized) {
245 fpstate_init(&fpu->state); 232 fpstate_init(&fpu->state);
246 trace_x86_fpu_init_state(fpu); 233 trace_x86_fpu_init_state(fpu);
247 234
248 trace_x86_fpu_activate_state(fpu); 235 trace_x86_fpu_activate_state(fpu);
249 /* Safe to do for the current task: */ 236 /* Safe to do for the current task: */
250 fpu->fpstate_active = 1; 237 fpu->initialized = 1;
251 } 238 }
252} 239}
253EXPORT_SYMBOL_GPL(fpu__activate_curr); 240EXPORT_SYMBOL_GPL(fpu__initialize);
254 241
255/* 242/*
256 * This function must be called before we read a task's fpstate. 243 * This function must be called before we read a task's fpstate.
257 * 244 *
258 * If the task has not used the FPU before then initialize its 245 * There's two cases where this gets called:
259 * fpstate. 246 *
247 * - for the current task (when coredumping), in which case we have
248 * to save the latest FPU registers into the fpstate,
249 *
250 * - or it's called for stopped tasks (ptrace), in which case the
251 * registers were already saved by the context-switch code when
252 * the task scheduled out - we only have to initialize the registers
253 * if they've never been initialized.
260 * 254 *
261 * If the task has used the FPU before then save it. 255 * If the task has used the FPU before then save it.
262 */ 256 */
263void fpu__activate_fpstate_read(struct fpu *fpu) 257void fpu__prepare_read(struct fpu *fpu)
264{ 258{
265 /* 259 if (fpu == &current->thread.fpu) {
266 * If fpregs are active (in the current CPU), then
267 * copy them to the fpstate:
268 */
269 if (fpu->fpregs_active) {
270 fpu__save(fpu); 260 fpu__save(fpu);
271 } else { 261 } else {
272 if (!fpu->fpstate_active) { 262 if (!fpu->initialized) {
273 fpstate_init(&fpu->state); 263 fpstate_init(&fpu->state);
274 trace_x86_fpu_init_state(fpu); 264 trace_x86_fpu_init_state(fpu);
275 265
276 trace_x86_fpu_activate_state(fpu); 266 trace_x86_fpu_activate_state(fpu);
277 /* Safe to do for current and for stopped child tasks: */ 267 /* Safe to do for current and for stopped child tasks: */
278 fpu->fpstate_active = 1; 268 fpu->initialized = 1;
279 } 269 }
280 } 270 }
281} 271}
@@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
283/* 273/*
284 * This function must be called before we write a task's fpstate. 274 * This function must be called before we write a task's fpstate.
285 * 275 *
286 * If the task has used the FPU before then unlazy it. 276 * If the task has used the FPU before then invalidate any cached FPU registers.
287 * If the task has not used the FPU before then initialize its fpstate. 277 * If the task has not used the FPU before then initialize its fpstate.
288 * 278 *
289 * After this function call, after registers in the fpstate are 279 * After this function call, after registers in the fpstate are
290 * modified and the child task has woken up, the child task will 280 * modified and the child task has woken up, the child task will
291 * restore the modified FPU state from the modified context. If we 281 * restore the modified FPU state from the modified context. If we
292 * didn't clear its lazy status here then the lazy in-registers 282 * didn't clear its cached status here then the cached in-registers
293 * state pending on its former CPU could be restored, corrupting 283 * state pending on its former CPU could be restored, corrupting
294 * the modifications. 284 * the modifications.
295 */ 285 */
296void fpu__activate_fpstate_write(struct fpu *fpu) 286void fpu__prepare_write(struct fpu *fpu)
297{ 287{
298 /* 288 /*
299 * Only stopped child tasks can be used to modify the FPU 289 * Only stopped child tasks can be used to modify the FPU
@@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
301 */ 291 */
302 WARN_ON_FPU(fpu == &current->thread.fpu); 292 WARN_ON_FPU(fpu == &current->thread.fpu);
303 293
304 if (fpu->fpstate_active) { 294 if (fpu->initialized) {
305 /* Invalidate any lazy state: */ 295 /* Invalidate any cached state: */
306 __fpu_invalidate_fpregs_state(fpu); 296 __fpu_invalidate_fpregs_state(fpu);
307 } else { 297 } else {
308 fpstate_init(&fpu->state); 298 fpstate_init(&fpu->state);
@@ -310,74 +300,11 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
310 300
311 trace_x86_fpu_activate_state(fpu); 301 trace_x86_fpu_activate_state(fpu);
312 /* Safe to do for stopped child tasks: */ 302 /* Safe to do for stopped child tasks: */
313 fpu->fpstate_active = 1; 303 fpu->initialized = 1;
314 } 304 }
315} 305}
316 306
317/* 307/*
318 * This function must be called before we write the current
319 * task's fpstate.
320 *
321 * This call gets the current FPU register state and moves
322 * it in to the 'fpstate'. Preemption is disabled so that
323 * no writes to the 'fpstate' can occur from context
324 * swiches.
325 *
326 * Must be followed by a fpu__current_fpstate_write_end().
327 */
328void fpu__current_fpstate_write_begin(void)
329{
330 struct fpu *fpu = &current->thread.fpu;
331
332 /*
333 * Ensure that the context-switching code does not write
334 * over the fpstate while we are doing our update.
335 */
336 preempt_disable();
337
338 /*
339 * Move the fpregs in to the fpu's 'fpstate'.
340 */
341 fpu__activate_fpstate_read(fpu);
342
343 /*
344 * The caller is about to write to 'fpu'. Ensure that no
345 * CPU thinks that its fpregs match the fpstate. This
346 * ensures we will not be lazy and skip a XRSTOR in the
347 * future.
348 */
349 __fpu_invalidate_fpregs_state(fpu);
350}
351
352/*
353 * This function must be paired with fpu__current_fpstate_write_begin()
354 *
355 * This will ensure that the modified fpstate gets placed back in
356 * the fpregs if necessary.
357 *
358 * Note: This function may be called whether or not an _actual_
359 * write to the fpstate occurred.
360 */
361void fpu__current_fpstate_write_end(void)
362{
363 struct fpu *fpu = &current->thread.fpu;
364
365 /*
366 * 'fpu' now has an updated copy of the state, but the
367 * registers may still be out of date. Update them with
368 * an XRSTOR if they are active.
369 */
370 if (fpregs_active())
371 copy_kernel_to_fpregs(&fpu->state);
372
373 /*
374 * Our update is done and the fpregs/fpstate are in sync
375 * if necessary. Context switches can happen again.
376 */
377 preempt_enable();
378}
379
380/*
381 * 'fpu__restore()' is called to copy FPU registers from 308 * 'fpu__restore()' is called to copy FPU registers from
382 * the FPU fpstate to the live hw registers and to activate 309 * the FPU fpstate to the live hw registers and to activate
383 * access to the hardware registers, so that FPU instructions 310 * access to the hardware registers, so that FPU instructions
@@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void)
389 */ 316 */
390void fpu__restore(struct fpu *fpu) 317void fpu__restore(struct fpu *fpu)
391{ 318{
392 fpu__activate_curr(fpu); 319 fpu__initialize(fpu);
393 320
394 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ 321 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
395 kernel_fpu_disable(); 322 kernel_fpu_disable();
@@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu)
414{ 341{
415 preempt_disable(); 342 preempt_disable();
416 343
417 if (fpu->fpregs_active) { 344 if (fpu == &current->thread.fpu) {
418 /* Ignore delayed exceptions from user space */ 345 if (fpu->initialized) {
419 asm volatile("1: fwait\n" 346 /* Ignore delayed exceptions from user space */
420 "2:\n" 347 asm volatile("1: fwait\n"
421 _ASM_EXTABLE(1b, 2b)); 348 "2:\n"
422 fpregs_deactivate(fpu); 349 _ASM_EXTABLE(1b, 2b));
350 fpregs_deactivate(fpu);
351 }
423 } 352 }
424 353
425 fpu->fpstate_active = 0; 354 fpu->initialized = 0;
426 355
427 trace_x86_fpu_dropped(fpu); 356 trace_x86_fpu_dropped(fpu);
428 357
@@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu)
462 * Make sure fpstate is cleared and initialized. 391 * Make sure fpstate is cleared and initialized.
463 */ 392 */
464 if (static_cpu_has(X86_FEATURE_FPU)) { 393 if (static_cpu_has(X86_FEATURE_FPU)) {
465 fpu__activate_curr(fpu); 394 preempt_disable();
395 fpu__initialize(fpu);
466 user_fpu_begin(); 396 user_fpu_begin();
467 copy_init_fpstate_to_fpregs(); 397 copy_init_fpstate_to_fpregs();
398 preempt_enable();
468 } 399 }
469} 400}
470 401
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index d5d44c452624..7affb7e3d9a5 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
240 WARN_ON_FPU(!on_boot_cpu); 240 WARN_ON_FPU(!on_boot_cpu);
241 on_boot_cpu = 0; 241 on_boot_cpu = 0;
242 242
243 WARN_ON_FPU(current->thread.fpu.fpstate_active); 243 WARN_ON_FPU(current->thread.fpu.initialized);
244} 244}
245 245
246/* 246/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index b188b16841e3..3ea151372389 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
16{ 16{
17 struct fpu *target_fpu = &target->thread.fpu; 17 struct fpu *target_fpu = &target->thread.fpu;
18 18
19 return target_fpu->fpstate_active ? regset->n : 0; 19 return target_fpu->initialized ? regset->n : 0;
20} 20}
21 21
22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) 22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
23{ 23{
24 struct fpu *target_fpu = &target->thread.fpu; 24 struct fpu *target_fpu = &target->thread.fpu;
25 25
26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) 26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
27 return regset->n; 27 return regset->n;
28 else 28 else
29 return 0; 29 return 0;
@@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
38 if (!boot_cpu_has(X86_FEATURE_FXSR)) 38 if (!boot_cpu_has(X86_FEATURE_FXSR))
39 return -ENODEV; 39 return -ENODEV;
40 40
41 fpu__activate_fpstate_read(fpu); 41 fpu__prepare_read(fpu);
42 fpstate_sanitize_xstate(fpu); 42 fpstate_sanitize_xstate(fpu);
43 43
44 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 44 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
55 if (!boot_cpu_has(X86_FEATURE_FXSR)) 55 if (!boot_cpu_has(X86_FEATURE_FXSR))
56 return -ENODEV; 56 return -ENODEV;
57 57
58 fpu__activate_fpstate_write(fpu); 58 fpu__prepare_write(fpu);
59 fpstate_sanitize_xstate(fpu); 59 fpstate_sanitize_xstate(fpu);
60 60
61 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 61 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
89 89
90 xsave = &fpu->state.xsave; 90 xsave = &fpu->state.xsave;
91 91
92 fpu__activate_fpstate_read(fpu); 92 fpu__prepare_read(fpu);
93 93
94 if (using_compacted_format()) { 94 if (using_compacted_format()) {
95 ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave); 95 if (kbuf)
96 ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
97 else
98 ret = copy_xstate_to_user(ubuf, xsave, pos, count);
96 } else { 99 } else {
97 fpstate_sanitize_xstate(fpu); 100 fpstate_sanitize_xstate(fpu);
98 /* 101 /*
@@ -129,28 +132,29 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
129 132
130 xsave = &fpu->state.xsave; 133 xsave = &fpu->state.xsave;
131 134
132 fpu__activate_fpstate_write(fpu); 135 fpu__prepare_write(fpu);
133 136
134 if (boot_cpu_has(X86_FEATURE_XSAVES)) 137 if (using_compacted_format()) {
135 ret = copyin_to_xsaves(kbuf, ubuf, xsave); 138 if (kbuf)
136 else 139 ret = copy_kernel_to_xstate(xsave, kbuf);
140 else
141 ret = copy_user_to_xstate(xsave, ubuf);
142 } else {
137 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); 143 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
138 144 if (!ret)
139 /* 145 ret = validate_xstate_header(&xsave->header);
140 * In case of failure, mark all states as init: 146 }
141 */
142 if (ret)
143 fpstate_init(&fpu->state);
144 147
145 /* 148 /*
146 * mxcsr reserved bits must be masked to zero for security reasons. 149 * mxcsr reserved bits must be masked to zero for security reasons.
147 */ 150 */
148 xsave->i387.mxcsr &= mxcsr_feature_mask; 151 xsave->i387.mxcsr &= mxcsr_feature_mask;
149 xsave->header.xfeatures &= xfeatures_mask; 152
150 /* 153 /*
151 * These bits must be zero. 154 * In case of failure, mark all states as init:
152 */ 155 */
153 memset(&xsave->header.reserved, 0, 48); 156 if (ret)
157 fpstate_init(&fpu->state);
154 158
155 return ret; 159 return ret;
156} 160}
@@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
299 struct fpu *fpu = &target->thread.fpu; 303 struct fpu *fpu = &target->thread.fpu;
300 struct user_i387_ia32_struct env; 304 struct user_i387_ia32_struct env;
301 305
302 fpu__activate_fpstate_read(fpu); 306 fpu__prepare_read(fpu);
303 307
304 if (!boot_cpu_has(X86_FEATURE_FPU)) 308 if (!boot_cpu_has(X86_FEATURE_FPU))
305 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 309 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
@@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
329 struct user_i387_ia32_struct env; 333 struct user_i387_ia32_struct env;
330 int ret; 334 int ret;
331 335
332 fpu__activate_fpstate_write(fpu); 336 fpu__prepare_write(fpu);
333 fpstate_sanitize_xstate(fpu); 337 fpstate_sanitize_xstate(fpu);
334 338
335 if (!boot_cpu_has(X86_FEATURE_FPU)) 339 if (!boot_cpu_has(X86_FEATURE_FPU))
@@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
369 struct fpu *fpu = &tsk->thread.fpu; 373 struct fpu *fpu = &tsk->thread.fpu;
370 int fpvalid; 374 int fpvalid;
371 375
372 fpvalid = fpu->fpstate_active; 376 fpvalid = fpu->initialized;
373 if (fpvalid) 377 if (fpvalid)
374 fpvalid = !fpregs_get(tsk, NULL, 378 fpvalid = !fpregs_get(tsk, NULL,
375 0, sizeof(struct user_i387_ia32_struct), 379 0, sizeof(struct user_i387_ia32_struct),
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 83c23c230b4c..fb639e70048f 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
155 */ 155 */
156int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) 156int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
157{ 157{
158 struct xregs_state *xsave = &current->thread.fpu.state.xsave; 158 struct fpu *fpu = &current->thread.fpu;
159 struct xregs_state *xsave = &fpu->state.xsave;
159 struct task_struct *tsk = current; 160 struct task_struct *tsk = current;
160 int ia32_fxstate = (buf != buf_fx); 161 int ia32_fxstate = (buf != buf_fx);
161 162
@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
170 sizeof(struct user_i387_ia32_struct), NULL, 171 sizeof(struct user_i387_ia32_struct), NULL,
171 (struct _fpstate_32 __user *) buf) ? -1 : 1; 172 (struct _fpstate_32 __user *) buf) ? -1 : 1;
172 173
173 if (fpregs_active() || using_compacted_format()) { 174 if (fpu->initialized || using_compacted_format()) {
174 /* Save the live register state to the user directly. */ 175 /* Save the live register state to the user directly. */
175 if (copy_fpregs_to_sigframe(buf_fx)) 176 if (copy_fpregs_to_sigframe(buf_fx))
176 return -1; 177 return -1;
177 /* Update the thread's fxstate to save the fsave header. */ 178 /* Update the thread's fxstate to save the fsave header. */
178 if (ia32_fxstate) 179 if (ia32_fxstate)
179 copy_fxregs_to_kernel(&tsk->thread.fpu); 180 copy_fxregs_to_kernel(fpu);
180 } else { 181 } else {
181 /* 182 /*
182 * It is a *bug* if kernel uses compacted-format for xsave 183 * It is a *bug* if kernel uses compacted-format for xsave
@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
189 return -1; 190 return -1;
190 } 191 }
191 192
192 fpstate_sanitize_xstate(&tsk->thread.fpu); 193 fpstate_sanitize_xstate(fpu);
193 if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) 194 if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
194 return -1; 195 return -1;
195 } 196 }
@@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk,
213 struct xstate_header *header = &xsave->header; 214 struct xstate_header *header = &xsave->header;
214 215
215 if (use_xsave()) { 216 if (use_xsave()) {
216 /* These bits must be zero. */ 217 /*
217 memset(header->reserved, 0, 48); 218 * Note: we don't need to zero the reserved bits in the
219 * xstate_header here because we either didn't copy them at all,
220 * or we checked earlier that they aren't set.
221 */
218 222
219 /* 223 /*
220 * Init the state that is not present in the memory 224 * Init the state that is not present in the memory
@@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
223 if (fx_only) 227 if (fx_only)
224 header->xfeatures = XFEATURE_MASK_FPSSE; 228 header->xfeatures = XFEATURE_MASK_FPSSE;
225 else 229 else
226 header->xfeatures &= (xfeatures_mask & xfeatures); 230 header->xfeatures &= xfeatures;
227 } 231 }
228 232
229 if (use_fxsr()) { 233 if (use_fxsr()) {
@@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
279 if (!access_ok(VERIFY_READ, buf, size)) 283 if (!access_ok(VERIFY_READ, buf, size))
280 return -EACCES; 284 return -EACCES;
281 285
282 fpu__activate_curr(fpu); 286 fpu__initialize(fpu);
283 287
284 if (!static_cpu_has(X86_FEATURE_FPU)) 288 if (!static_cpu_has(X86_FEATURE_FPU))
285 return fpregs_soft_set(current, NULL, 289 return fpregs_soft_set(current, NULL,
@@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
307 /* 311 /*
308 * For 32-bit frames with fxstate, copy the user state to the 312 * For 32-bit frames with fxstate, copy the user state to the
309 * thread's fpu state, reconstruct fxstate from the fsave 313 * thread's fpu state, reconstruct fxstate from the fsave
310 * header. Sanitize the copied state etc. 314 * header. Validate and sanitize the copied state.
311 */ 315 */
312 struct fpu *fpu = &tsk->thread.fpu; 316 struct fpu *fpu = &tsk->thread.fpu;
313 struct user_i387_ia32_struct env; 317 struct user_i387_ia32_struct env;
314 int err = 0; 318 int err = 0;
315 319
316 /* 320 /*
317 * Drop the current fpu which clears fpu->fpstate_active. This ensures 321 * Drop the current fpu which clears fpu->initialized. This ensures
318 * that any context-switch during the copy of the new state, 322 * that any context-switch during the copy of the new state,
319 * avoids the intermediate state from getting restored/saved. 323 * avoids the intermediate state from getting restored/saved.
320 * Thus avoiding the new restored state from getting corrupted. 324 * Thus avoiding the new restored state from getting corrupted.
321 * We will be ready to restore/save the state only after 325 * We will be ready to restore/save the state only after
322 * fpu->fpstate_active is again set. 326 * fpu->initialized is again set.
323 */ 327 */
324 fpu__drop(fpu); 328 fpu__drop(fpu);
325 329
326 if (using_compacted_format()) { 330 if (using_compacted_format()) {
327 err = copyin_to_xsaves(NULL, buf_fx, 331 err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
328 &fpu->state.xsave);
329 } else { 332 } else {
330 err = __copy_from_user(&fpu->state.xsave, 333 err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
331 buf_fx, state_size); 334
335 if (!err && state_size > offsetof(struct xregs_state, header))
336 err = validate_xstate_header(&fpu->state.xsave.header);
332 } 337 }
333 338
334 if (err || __copy_from_user(&env, buf, sizeof(env))) { 339 if (err || __copy_from_user(&env, buf, sizeof(env))) {
@@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
339 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
340 } 345 }
341 346
342 fpu->fpstate_active = 1; 347 fpu->initialized = 1;
343 preempt_disable(); 348 preempt_disable();
344 fpu__restore(fpu); 349 fpu__restore(fpu);
345 preempt_enable(); 350 preempt_enable();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index c24ac1efb12d..f1d5476c9022 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -483,6 +483,30 @@ int using_compacted_format(void)
483 return boot_cpu_has(X86_FEATURE_XSAVES); 483 return boot_cpu_has(X86_FEATURE_XSAVES);
484} 484}
485 485
486/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
487int validate_xstate_header(const struct xstate_header *hdr)
488{
489 /* No unknown or supervisor features may be set */
490 if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
491 return -EINVAL;
492
493 /* Userspace must use the uncompacted format */
494 if (hdr->xcomp_bv)
495 return -EINVAL;
496
497 /*
498 * If 'reserved' is shrunken to add a new field, make sure to validate
499 * that new field here!
500 */
501 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
502
503 /* No reserved bits may be set */
504 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
505 return -EINVAL;
506
507 return 0;
508}
509
486static void __xstate_dump_leaves(void) 510static void __xstate_dump_leaves(void)
487{ 511{
488 int i; 512 int i;
@@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state)
867{ 891{
868 struct fpu *fpu = &current->thread.fpu; 892 struct fpu *fpu = &current->thread.fpu;
869 893
870 if (!fpu->fpstate_active) 894 if (!fpu->initialized)
871 return NULL; 895 return NULL;
872 /* 896 /*
873 * fpu__save() takes the CPU's xstate registers 897 * fpu__save() takes the CPU's xstate registers
@@ -921,38 +945,129 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
921#endif /* ! CONFIG_ARCH_HAS_PKEYS */ 945#endif /* ! CONFIG_ARCH_HAS_PKEYS */
922 946
923/* 947/*
948 * Weird legacy quirk: SSE and YMM states store information in the
949 * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
950 * area is marked as unused in the xfeatures header, we need to copy
951 * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
952 */
953static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
954{
955 if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
956 return false;
957
958 if (xfeatures & XFEATURE_MASK_FP)
959 return false;
960
961 return true;
962}
963
964/*
924 * This is similar to user_regset_copyout(), but will not add offset to 965 * This is similar to user_regset_copyout(), but will not add offset to
925 * the source data pointer or increment pos, count, kbuf, and ubuf. 966 * the source data pointer or increment pos, count, kbuf, and ubuf.
926 */ 967 */
927static inline int xstate_copyout(unsigned int pos, unsigned int count, 968static inline void
928 void *kbuf, void __user *ubuf, 969__copy_xstate_to_kernel(void *kbuf, const void *data,
929 const void *data, const int start_pos, 970 unsigned int offset, unsigned int size, unsigned int size_total)
930 const int end_pos)
931{ 971{
932 if ((count == 0) || (pos < start_pos)) 972 if (offset < size_total) {
933 return 0; 973 unsigned int copy = min(size, size_total - offset);
934 974
935 if (end_pos < 0 || pos < end_pos) { 975 memcpy(kbuf + offset, data, copy);
936 unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); 976 }
977}
937 978
938 if (kbuf) { 979/*
939 memcpy(kbuf + pos, data, copy); 980 * Convert from kernel XSAVES compacted format to standard format and copy
940 } else { 981 * to a kernel-space ptrace buffer.
941 if (__copy_to_user(ubuf + pos, data, copy)) 982 *
942 return -EFAULT; 983 * It supports partial copy but pos always starts from zero. This is called
984 * from xstateregs_get() and there we check the CPU has XSAVES.
985 */
986int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
987{
988 unsigned int offset, size;
989 struct xstate_header header;
990 int i;
991
992 /*
993 * Currently copy_regset_to_user() starts from pos 0:
994 */
995 if (unlikely(offset_start != 0))
996 return -EFAULT;
997
998 /*
999 * The destination is a ptrace buffer; we put in only user xstates:
1000 */
1001 memset(&header, 0, sizeof(header));
1002 header.xfeatures = xsave->header.xfeatures;
1003 header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
1004
1005 /*
1006 * Copy xregs_state->header:
1007 */
1008 offset = offsetof(struct xregs_state, header);
1009 size = sizeof(header);
1010
1011 __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
1012
1013 for (i = 0; i < XFEATURE_MAX; i++) {
1014 /*
1015 * Copy only in-use xstates:
1016 */
1017 if ((header.xfeatures >> i) & 1) {
1018 void *src = __raw_xsave_addr(xsave, 1 << i);
1019
1020 offset = xstate_offsets[i];
1021 size = xstate_sizes[i];
1022
1023 /* The next component has to fit fully into the output buffer: */
1024 if (offset + size > size_total)
1025 break;
1026
1027 __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
943 } 1028 }
1029
1030 }
1031
1032 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1033 offset = offsetof(struct fxregs_state, mxcsr);
1034 size = MXCSR_AND_FLAGS_SIZE;
1035 __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
1036 }
1037
1038 /*
1039 * Fill xsave->i387.sw_reserved value for ptrace frame:
1040 */
1041 offset = offsetof(struct fxregs_state, sw_reserved);
1042 size = sizeof(xstate_fx_sw_bytes);
1043
1044 __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
1045
1046 return 0;
1047}
1048
1049static inline int
1050__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
1051{
1052 if (!size)
1053 return 0;
1054
1055 if (offset < size_total) {
1056 unsigned int copy = min(size, size_total - offset);
1057
1058 if (__copy_to_user(ubuf + offset, data, copy))
1059 return -EFAULT;
944 } 1060 }
945 return 0; 1061 return 0;
946} 1062}
947 1063
948/* 1064/*
949 * Convert from kernel XSAVES compacted format to standard format and copy 1065 * Convert from kernel XSAVES compacted format to standard format and copy
950 * to a ptrace buffer. It supports partial copy but pos always starts from 1066 * to a user-space buffer. It supports partial copy but pos always starts from
951 * zero. This is called from xstateregs_get() and there we check the CPU 1067 * zero. This is called from xstateregs_get() and there we check the CPU
952 * has XSAVES. 1068 * has XSAVES.
953 */ 1069 */
954int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, 1070int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
955 void __user *ubuf, struct xregs_state *xsave)
956{ 1071{
957 unsigned int offset, size; 1072 unsigned int offset, size;
958 int ret, i; 1073 int ret, i;
@@ -961,7 +1076,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
961 /* 1076 /*
962 * Currently copy_regset_to_user() starts from pos 0: 1077 * Currently copy_regset_to_user() starts from pos 0:
963 */ 1078 */
964 if (unlikely(pos != 0)) 1079 if (unlikely(offset_start != 0))
965 return -EFAULT; 1080 return -EFAULT;
966 1081
967 /* 1082 /*
@@ -977,8 +1092,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
977 offset = offsetof(struct xregs_state, header); 1092 offset = offsetof(struct xregs_state, header);
978 size = sizeof(header); 1093 size = sizeof(header);
979 1094
980 ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count); 1095 ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
981
982 if (ret) 1096 if (ret)
983 return ret; 1097 return ret;
984 1098
@@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
992 offset = xstate_offsets[i]; 1106 offset = xstate_offsets[i];
993 size = xstate_sizes[i]; 1107 size = xstate_sizes[i];
994 1108
995 ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count); 1109 /* The next component has to fit fully into the output buffer: */
1110 if (offset + size > size_total)
1111 break;
996 1112
1113 ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
997 if (ret) 1114 if (ret)
998 return ret; 1115 return ret;
999
1000 if (offset + size >= count)
1001 break;
1002 } 1116 }
1003 1117
1004 } 1118 }
1005 1119
1120 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1121 offset = offsetof(struct fxregs_state, mxcsr);
1122 size = MXCSR_AND_FLAGS_SIZE;
1123 __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
1124 }
1125
1006 /* 1126 /*
1007 * Fill xsave->i387.sw_reserved value for ptrace frame: 1127 * Fill xsave->i387.sw_reserved value for ptrace frame:
1008 */ 1128 */
1009 offset = offsetof(struct fxregs_state, sw_reserved); 1129 offset = offsetof(struct fxregs_state, sw_reserved);
1010 size = sizeof(xstate_fx_sw_bytes); 1130 size = sizeof(xstate_fx_sw_bytes);
1011 1131
1012 ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); 1132 ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
1013
1014 if (ret) 1133 if (ret)
1015 return ret; 1134 return ret;
1016 1135
@@ -1018,55 +1137,98 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
1018} 1137}
1019 1138
1020/* 1139/*
1021 * Convert from a ptrace standard-format buffer to kernel XSAVES format 1140 * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
1022 * and copy to the target thread. This is called from xstateregs_set() and 1141 * and copy to the target thread. This is called from xstateregs_set().
1023 * there we check the CPU has XSAVES and a whole standard-sized buffer
1024 * exists.
1025 */ 1142 */
1026int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, 1143int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
1027 struct xregs_state *xsave)
1028{ 1144{
1029 unsigned int offset, size; 1145 unsigned int offset, size;
1030 int i; 1146 int i;
1031 u64 xfeatures; 1147 struct xstate_header hdr;
1032 u64 allowed_features;
1033 1148
1034 offset = offsetof(struct xregs_state, header); 1149 offset = offsetof(struct xregs_state, header);
1035 size = sizeof(xfeatures); 1150 size = sizeof(hdr);
1036 1151
1037 if (kbuf) { 1152 memcpy(&hdr, kbuf + offset, size);
1038 memcpy(&xfeatures, kbuf + offset, size); 1153
1039 } else { 1154 if (validate_xstate_header(&hdr))
1040 if (__copy_from_user(&xfeatures, ubuf + offset, size)) 1155 return -EINVAL;
1041 return -EFAULT; 1156
1157 for (i = 0; i < XFEATURE_MAX; i++) {
1158 u64 mask = ((u64)1 << i);
1159
1160 if (hdr.xfeatures & mask) {
1161 void *dst = __raw_xsave_addr(xsave, 1 << i);
1162
1163 offset = xstate_offsets[i];
1164 size = xstate_sizes[i];
1165
1166 memcpy(dst, kbuf + offset, size);
1167 }
1168 }
1169
1170 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1171 offset = offsetof(struct fxregs_state, mxcsr);
1172 size = MXCSR_AND_FLAGS_SIZE;
1173 memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
1042 } 1174 }
1043 1175
1044 /* 1176 /*
1045 * Reject if the user sets any disabled or supervisor features: 1177 * The state that came in from userspace was user-state only.
1178 * Mask all the user states out of 'xfeatures':
1179 */
1180 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
1181
1182 /*
1183 * Add back in the features that came in from userspace:
1046 */ 1184 */
1047 allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR; 1185 xsave->header.xfeatures |= hdr.xfeatures;
1048 1186
1049 if (xfeatures & ~allowed_features) 1187 return 0;
1188}
1189
1190/*
1191 * Convert from a ptrace or sigreturn standard-format user-space buffer to
1192 * kernel XSAVES format and copy to the target thread. This is called from
1193 * xstateregs_set(), as well as potentially from the sigreturn() and
1194 * rt_sigreturn() system calls.
1195 */
1196int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
1197{
1198 unsigned int offset, size;
1199 int i;
1200 struct xstate_header hdr;
1201
1202 offset = offsetof(struct xregs_state, header);
1203 size = sizeof(hdr);
1204
1205 if (__copy_from_user(&hdr, ubuf + offset, size))
1206 return -EFAULT;
1207
1208 if (validate_xstate_header(&hdr))
1050 return -EINVAL; 1209 return -EINVAL;
1051 1210
1052 for (i = 0; i < XFEATURE_MAX; i++) { 1211 for (i = 0; i < XFEATURE_MAX; i++) {
1053 u64 mask = ((u64)1 << i); 1212 u64 mask = ((u64)1 << i);
1054 1213
1055 if (xfeatures & mask) { 1214 if (hdr.xfeatures & mask) {
1056 void *dst = __raw_xsave_addr(xsave, 1 << i); 1215 void *dst = __raw_xsave_addr(xsave, 1 << i);
1057 1216
1058 offset = xstate_offsets[i]; 1217 offset = xstate_offsets[i];
1059 size = xstate_sizes[i]; 1218 size = xstate_sizes[i];
1060 1219
1061 if (kbuf) { 1220 if (__copy_from_user(dst, ubuf + offset, size))
1062 memcpy(dst, kbuf + offset, size); 1221 return -EFAULT;
1063 } else {
1064 if (__copy_from_user(dst, ubuf + offset, size))
1065 return -EFAULT;
1066 }
1067 } 1222 }
1068 } 1223 }
1069 1224
1225 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1226 offset = offsetof(struct fxregs_state, mxcsr);
1227 size = MXCSR_AND_FLAGS_SIZE;
1228 if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
1229 return -EFAULT;
1230 }
1231
1070 /* 1232 /*
1071 * The state that came in from userspace was user-state only. 1233 * The state that came in from userspace was user-state only.
1072 * Mask all the user states out of 'xfeatures': 1234 * Mask all the user states out of 'xfeatures':
@@ -1076,7 +1238,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
1076 /* 1238 /*
1077 * Add back in the features that came in from userspace: 1239 * Add back in the features that came in from userspace:
1078 */ 1240 */
1079 xsave->header.xfeatures |= xfeatures; 1241 xsave->header.xfeatures |= hdr.xfeatures;
1080 1242
1081 return 0; 1243 return 0;
1082} 1244}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e04442345fc0..4e188fda5961 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
263 sp = (unsigned long) ka->sa.sa_restorer; 263 sp = (unsigned long) ka->sa.sa_restorer;
264 } 264 }
265 265
266 if (fpu->fpstate_active) { 266 if (fpu->initialized) {
267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), 267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
268 &buf_fx, &math_size); 268 &buf_fx, &math_size);
269 *fpstate = (void __user *)sp; 269 *fpstate = (void __user *)sp;
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
279 return (void __user *)-1L; 279 return (void __user *)-1L;
280 280
281 /* save i387 and extended state */ 281 /* save i387 and extended state */
282 if (fpu->fpstate_active && 282 if (fpu->initialized &&
283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) 283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
284 return (void __user *)-1L; 284 return (void __user *)-1L;
285 285
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
755 /* 755 /*
756 * Ensure the signal handler starts with the new fpu state. 756 * Ensure the signal handler starts with the new fpu state.
757 */ 757 */
758 if (fpu->fpstate_active) 758 if (fpu->initialized)
759 fpu__clear(fpu); 759 fpu__clear(fpu);
760 } 760 }
761 signal_setup_done(failed, ksig, stepping); 761 signal_setup_done(failed, ksig, stepping);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cd17b7d9a107..03869eb7fcd6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7225 int r; 7225 int r;
7226 sigset_t sigsaved; 7226 sigset_t sigsaved;
7227 7227
7228 fpu__activate_curr(fpu); 7228 fpu__initialize(fpu);
7229 7229
7230 if (vcpu->sigset_active) 7230 if (vcpu->sigset_active)
7231 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 7231 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index d4a7df2205b8..220638a4cb94 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info)
114 struct desc_struct code_descriptor; 114 struct desc_struct code_descriptor;
115 struct fpu *fpu = &current->thread.fpu; 115 struct fpu *fpu = &current->thread.fpu;
116 116
117 fpu__activate_curr(fpu); 117 fpu__initialize(fpu);
118 118
119#ifdef RE_ENTRANT_CHECKING 119#ifdef RE_ENTRANT_CHECKING
120 if (emulating) { 120 if (emulating) {
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index c076f710de4c..c3521e2be396 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -2,6 +2,7 @@
2#include <linux/uaccess.h> 2#include <linux/uaccess.h>
3#include <linux/sched/debug.h> 3#include <linux/sched/debug.h>
4 4
5#include <asm/fpu/internal.h>
5#include <asm/traps.h> 6#include <asm/traps.h>
6#include <asm/kdebug.h> 7#include <asm/kdebug.h>
7 8
@@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
78} 79}
79EXPORT_SYMBOL_GPL(ex_handler_refcount); 80EXPORT_SYMBOL_GPL(ex_handler_refcount);
80 81
82/*
83 * Handler for when we fail to restore a task's FPU state. We should never get
84 * here because the FPU state of a task using the FPU (task->thread.fpu.state)
85 * should always be valid. However, past bugs have allowed userspace to set
86 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
87 * These caused XRSTOR to fail when switching to the task, leaking the FPU
88 * registers of the task previously executing on the CPU. Mitigate this class
89 * of vulnerability by restoring from the initial state (essentially, zeroing
90 * out all the FPU registers) if we can't restore from the task's FPU state.
91 */
92bool ex_handler_fprestore(const struct exception_table_entry *fixup,
93 struct pt_regs *regs, int trapnr)
94{
95 regs->ip = ex_fixup_addr(fixup);
96
97 WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
98 (void *)instruction_pointer(regs));
99
100 __copy_kernel_to_fpregs(&init_fpstate, -1);
101 return true;
102}
103EXPORT_SYMBOL_GPL(ex_handler_fprestore);
104
81bool ex_handler_ext(const struct exception_table_entry *fixup, 105bool ex_handler_ext(const struct exception_table_entry *fixup,
82 struct pt_regs *regs, int trapnr) 106 struct pt_regs *regs, int trapnr)
83{ 107{
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 2dab69a706ec..d7bc0eea20a5 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -18,7 +18,6 @@
18 18
19#include <asm/cpufeature.h> /* boot_cpu_has, ... */ 19#include <asm/cpufeature.h> /* boot_cpu_has, ... */
20#include <asm/mmu_context.h> /* vma_pkey() */ 20#include <asm/mmu_context.h> /* vma_pkey() */
21#include <asm/fpu/internal.h> /* fpregs_active() */
22 21
23int __execute_only_pkey(struct mm_struct *mm) 22int __execute_only_pkey(struct mm_struct *mm)
24{ 23{
@@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
45 */ 44 */
46 preempt_disable(); 45 preempt_disable();
47 if (!need_to_set_mm_pkey && 46 if (!need_to_set_mm_pkey &&
48 fpregs_active() && 47 current->thread.fpu.initialized &&
49 !__pkru_allows_read(read_pkru(), execute_only_pkey)) { 48 !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
50 preempt_enable(); 49 preempt_enable();
51 return execute_only_pkey; 50 return execute_only_pkey;