aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/include/asm/fpu/api.h15
-rw-r--r--arch/x86/include/asm/fpu/internal.h3
-rw-r--r--arch/x86/include/asm/thread_info.h8
-rw-r--r--arch/x86/kernel/fpu/core.c6
-rw-r--r--arch/x86/kernel/fpu/xstate.c4
-rw-r--r--arch/x86/kernel/process_32.c3
-rw-r--r--arch/x86/mm/pkeys.c1
8 files changed, 14 insertions, 32 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 42982a6cc6cf..107283b1eb1e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -82,8 +82,7 @@ struct efi_scratch {
82#define arch_efi_call_virt_setup() \ 82#define arch_efi_call_virt_setup() \
83({ \ 83({ \
84 efi_sync_low_kernel_mappings(); \ 84 efi_sync_low_kernel_mappings(); \
85 preempt_disable(); \ 85 kernel_fpu_begin(); \
86 __kernel_fpu_begin(); \
87 firmware_restrict_branch_speculation_start(); \ 86 firmware_restrict_branch_speculation_start(); \
88 \ 87 \
89 if (!efi_enabled(EFI_OLD_MEMMAP)) \ 88 if (!efi_enabled(EFI_OLD_MEMMAP)) \
@@ -99,8 +98,7 @@ struct efi_scratch {
99 efi_switch_mm(efi_scratch.prev_mm); \ 98 efi_switch_mm(efi_scratch.prev_mm); \
100 \ 99 \
101 firmware_restrict_branch_speculation_end(); \ 100 firmware_restrict_branch_speculation_end(); \
102 __kernel_fpu_end(); \ 101 kernel_fpu_end(); \
103 preempt_enable(); \
104}) 102})
105 103
106extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 104extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index a9caac9d4a72..b56d504af654 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -12,17 +12,12 @@
12#define _ASM_X86_FPU_API_H 12#define _ASM_X86_FPU_API_H
13 13
14/* 14/*
15 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled 15 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
16 * and they don't touch the preempt state on their own. 16 * disables preemption so be careful if you intend to use it for long periods
17 * If you enable preemption after __kernel_fpu_begin(), preempt notifier 17 * of time.
18 * should call the __kernel_fpu_end() to prevent the kernel/user FPU 18 * If you intend to use the FPU in softirq you need to check first with
19 * state from getting corrupted. KVM for example uses this model. 19 * irq_fpu_usable() if it is possible.
20 *
21 * All other cases use kernel_fpu_begin/end() which disable preemption
22 * during kernel FPU usage.
23 */ 20 */
24extern void __kernel_fpu_begin(void);
25extern void __kernel_fpu_end(void);
26extern void kernel_fpu_begin(void); 21extern void kernel_fpu_begin(void);
27extern void kernel_fpu_end(void); 22extern void kernel_fpu_end(void);
28extern bool irq_fpu_usable(void); 23extern bool irq_fpu_usable(void);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 69dcdf195b61..fa2c93cb42a2 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
106#define user_insn(insn, output, input...) \ 106#define user_insn(insn, output, input...) \
107({ \ 107({ \
108 int err; \ 108 int err; \
109 \
110 might_fault(); \
111 \
109 asm volatile(ASM_STAC "\n" \ 112 asm volatile(ASM_STAC "\n" \
110 "1:" #insn "\n\t" \ 113 "1:" #insn "\n\t" \
111 "2: " ASM_CLAC "\n" \ 114 "2: " ASM_CLAC "\n" \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 82b73b75d67c..e0eccbcb8447 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -140,14 +140,6 @@ struct thread_info {
140 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ 140 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
141 _TIF_NOHZ) 141 _TIF_NOHZ)
142 142
143/* work to do on any return to user space */
144#define _TIF_ALLWORK_MASK \
145 (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
146 _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
147 _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
148 _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \
149 _TIF_FSCHECK)
150
151/* flags to check in __switch_to() */ 143/* flags to check in __switch_to() */
152#define _TIF_WORK_CTXSW_BASE \ 144#define _TIF_WORK_CTXSW_BASE \
153 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ 145 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 2ea85b32421a..2e5003fef51a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
93} 93}
94EXPORT_SYMBOL(irq_fpu_usable); 94EXPORT_SYMBOL(irq_fpu_usable);
95 95
96void __kernel_fpu_begin(void) 96static void __kernel_fpu_begin(void)
97{ 97{
98 struct fpu *fpu = &current->thread.fpu; 98 struct fpu *fpu = &current->thread.fpu;
99 99
@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
111 __cpu_invalidate_fpregs_state(); 111 __cpu_invalidate_fpregs_state();
112 } 112 }
113} 113}
114EXPORT_SYMBOL(__kernel_fpu_begin);
115 114
116void __kernel_fpu_end(void) 115static void __kernel_fpu_end(void)
117{ 116{
118 struct fpu *fpu = &current->thread.fpu; 117 struct fpu *fpu = &current->thread.fpu;
119 118
@@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
122 121
123 kernel_fpu_enable(); 122 kernel_fpu_enable();
124} 123}
125EXPORT_SYMBOL(__kernel_fpu_end);
126 124
127void kernel_fpu_begin(void) 125void kernel_fpu_begin(void)
128{ 126{
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index cd3956fc8158..9cc108456d0b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr)
444 * format. Checking a supervisor state's uncompacted offset is 444 * format. Checking a supervisor state's uncompacted offset is
445 * an error. 445 * an error.
446 */ 446 */
447 if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) { 447 if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
448 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr); 448 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
449 return -1; 449 return -1;
450 } 450 }
@@ -808,8 +808,6 @@ void fpu__resume_cpu(void)
808 * Given an xstate feature mask, calculate where in the xsave 808 * Given an xstate feature mask, calculate where in the xsave
809 * buffer the state is. Callers should ensure that the buffer 809 * buffer the state is. Callers should ensure that the buffer
810 * is valid. 810 * is valid.
811 *
812 * Note: does not work for compacted buffers.
813 */ 811 */
814static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) 812static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
815{ 813{
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9d08f0510620..e471d8e6f0b2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -44,9 +44,6 @@
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/fpu/internal.h> 45#include <asm/fpu/internal.h>
46#include <asm/desc.h> 46#include <asm/desc.h>
47#ifdef CONFIG_MATH_EMULATION
48#include <asm/math_emu.h>
49#endif
50 47
51#include <linux/err.h> 48#include <linux/err.h>
52 49
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 6e98e0a7c923..047a77f6a10c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
131 * in the process's lifetime will not accidentally get access 131 * in the process's lifetime will not accidentally get access
132 * to data which is pkey-protected later on. 132 * to data which is pkey-protected later on.
133 */ 133 */
134static
134u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) | 135u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
135 PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) | 136 PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
136 PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) | 137 PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |