diff options
| author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-11-29 10:02:10 -0500 |
|---|---|---|
| committer | Borislav Petkov <bp@suse.de> | 2018-12-04 06:37:28 -0500 |
| commit | 12209993e98c5fa1855c467f22a24e3d5b8be205 (patch) | |
| tree | 73e4a7b5e6e48b1562b8db90523ade974db242cd | |
| parent | 2f2fcc40a961ed04f0e130803fbaa868c2899310 (diff) | |
x86/fpu: Don't export __kernel_fpu_{begin,end}()
There is one user of __kernel_fpu_begin() and before invoking it,
it invokes preempt_disable(). So it could invoke kernel_fpu_begin()
right away. The 32bit version of arch_efi_call_virt_setup() and
arch_efi_call_virt_teardown() does this already.
The comment above *kernel_fpu*() claims that before invoking
__kernel_fpu_begin() preemption should be disabled and that KVM is a
good example of doing it. Well, KVM doesn't do that since commit
f775b13eedee2 ("x86,kvm: move qemu/guest FPU switching out to vcpu_run")
so it is not an example anymore.
With EFI gone as the last user of __kernel_fpu_{begin|end}(), both can
be made static and not exported anymore.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Rik van Riel <riel@surriel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Nicolai Stange <nstange@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: linux-efi <linux-efi@vger.kernel.org>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20181129150210.2k4mawt37ow6c2vq@linutronix.de
| -rw-r--r-- | arch/x86/include/asm/efi.h | 6 | ||||
| -rw-r--r-- | arch/x86/include/asm/fpu/api.h | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/fpu/core.c | 6 |
3 files changed, 9 insertions, 18 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index eea40d52ca78..45864898f7e5 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
| @@ -82,8 +82,7 @@ struct efi_scratch { | |||
| 82 | #define arch_efi_call_virt_setup() \ | 82 | #define arch_efi_call_virt_setup() \ |
| 83 | ({ \ | 83 | ({ \ |
| 84 | efi_sync_low_kernel_mappings(); \ | 84 | efi_sync_low_kernel_mappings(); \ |
| 85 | preempt_disable(); \ | 85 | kernel_fpu_begin(); \ |
| 86 | __kernel_fpu_begin(); \ | ||
| 87 | firmware_restrict_branch_speculation_start(); \ | 86 | firmware_restrict_branch_speculation_start(); \ |
| 88 | \ | 87 | \ |
| 89 | if (!efi_enabled(EFI_OLD_MEMMAP)) \ | 88 | if (!efi_enabled(EFI_OLD_MEMMAP)) \ |
| @@ -99,8 +98,7 @@ struct efi_scratch { | |||
| 99 | efi_switch_mm(efi_scratch.prev_mm); \ | 98 | efi_switch_mm(efi_scratch.prev_mm); \ |
| 100 | \ | 99 | \ |
| 101 | firmware_restrict_branch_speculation_end(); \ | 100 | firmware_restrict_branch_speculation_end(); \ |
| 102 | __kernel_fpu_end(); \ | 101 | kernel_fpu_end(); \ |
| 103 | preempt_enable(); \ | ||
| 104 | }) | 102 | }) |
| 105 | 103 | ||
| 106 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, | 104 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, |
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a9caac9d4a72..b56d504af654 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h | |||
| @@ -12,17 +12,12 @@ | |||
| 12 | #define _ASM_X86_FPU_API_H | 12 | #define _ASM_X86_FPU_API_H |
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * Careful: __kernel_fpu_begin/end() must be called with preempt disabled | 15 | * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It |
| 16 | * and they don't touch the preempt state on their own. | 16 | * disables preemption so be careful if you intend to use it for long periods |
| 17 | * If you enable preemption after __kernel_fpu_begin(), preempt notifier | 17 | * of time. |
| 18 | * should call the __kernel_fpu_end() to prevent the kernel/user FPU | 18 | * If you intend to use the FPU in softirq you need to check first with |
| 19 | * state from getting corrupted. KVM for example uses this model. | 19 | * irq_fpu_usable() if it is possible. |
| 20 | * | ||
| 21 | * All other cases use kernel_fpu_begin/end() which disable preemption | ||
| 22 | * during kernel FPU usage. | ||
| 23 | */ | 20 | */ |
| 24 | extern void __kernel_fpu_begin(void); | ||
| 25 | extern void __kernel_fpu_end(void); | ||
| 26 | extern void kernel_fpu_begin(void); | 21 | extern void kernel_fpu_begin(void); |
| 27 | extern void kernel_fpu_end(void); | 22 | extern void kernel_fpu_end(void); |
| 28 | extern bool irq_fpu_usable(void); | 23 | extern bool irq_fpu_usable(void); |
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 2ea85b32421a..2e5003fef51a 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
| @@ -93,7 +93,7 @@ bool irq_fpu_usable(void) | |||
| 93 | } | 93 | } |
| 94 | EXPORT_SYMBOL(irq_fpu_usable); | 94 | EXPORT_SYMBOL(irq_fpu_usable); |
| 95 | 95 | ||
| 96 | void __kernel_fpu_begin(void) | 96 | static void __kernel_fpu_begin(void) |
| 97 | { | 97 | { |
| 98 | struct fpu *fpu = ¤t->thread.fpu; | 98 | struct fpu *fpu = ¤t->thread.fpu; |
| 99 | 99 | ||
| @@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) | |||
| 111 | __cpu_invalidate_fpregs_state(); | 111 | __cpu_invalidate_fpregs_state(); |
| 112 | } | 112 | } |
| 113 | } | 113 | } |
| 114 | EXPORT_SYMBOL(__kernel_fpu_begin); | ||
| 115 | 114 | ||
| 116 | void __kernel_fpu_end(void) | 115 | static void __kernel_fpu_end(void) |
| 117 | { | 116 | { |
| 118 | struct fpu *fpu = ¤t->thread.fpu; | 117 | struct fpu *fpu = ¤t->thread.fpu; |
| 119 | 118 | ||
| @@ -122,7 +121,6 @@ void __kernel_fpu_end(void) | |||
| 122 | 121 | ||
| 123 | kernel_fpu_enable(); | 122 | kernel_fpu_enable(); |
| 124 | } | 123 | } |
| 125 | EXPORT_SYMBOL(__kernel_fpu_end); | ||
| 126 | 124 | ||
| 127 | void kernel_fpu_begin(void) | 125 | void kernel_fpu_begin(void) |
| 128 | { | 126 | { |
